wasmtime/runtime/store.rs
1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//! Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//! intended to be consumed by the outside world. Note that the "just a
39//! pointer large" is a load-bearing implementation detail in Wasmtime. This
40//! enables it to store a pointer to its own trait object which doesn't need
41//! to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//! stored inside the `Box`. This is the general Rust pattern when one struct
45//! is a layer over another. The surprising part, though, is that this is
46//! further subdivided. This structure only contains things which actually
47//! need `T` itself. The downside of this structure is that it's always
48//! generic and means that code is monomorphized into consumer crates. We
49//! strive to have things be as monomorphic as possible in `wasmtime` so this
50//! type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//! Stored inline in the outer type the "opaque" here means that it's a
54//! "store" but it doesn't have access to the `T`. This is the primary
55//! "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//! internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//! All references of Wasm items into a `Store` are actually indices into a
60//! table in this structure, and the `StoreData` being separate makes it a bit
61//! easier to manage/define/work with. There's no real fundamental reason this
62//! is split out, although sometimes it's useful to have separate borrows into
63//! these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::instance::InstanceData;
80use crate::linker::Definition;
81use crate::module::RegisteredModuleId;
82use crate::prelude::*;
83use crate::runtime::vm::mpk::{self, ProtectionKey, ProtectionMask};
84use crate::runtime::vm::{
85 Backtrace, ExportGlobal, GcHeapAllocationIndex, GcRootsList, GcStore,
86 InstanceAllocationRequest, InstanceAllocator, InstanceHandle, ModuleRuntimeInfo,
87 OnDemandInstanceAllocator, SignalHandler, StoreBox, StorePtr, VMContext, VMFuncRef, VMGcRef,
88 VMRuntimeLimits, WasmFault,
89};
90use crate::trampoline::VMHostGlobalContext;
91use crate::type_registry::RegisteredType;
92use crate::RootSet;
93use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
94use crate::{Global, Instance, Memory, RootScope, Table, Uninhabited};
95use alloc::sync::Arc;
96use core::cell::UnsafeCell;
97use core::fmt;
98use core::future::Future;
99use core::marker;
100use core::mem::{self, ManuallyDrop};
101use core::num::NonZeroU64;
102use core::ops::{Deref, DerefMut};
103use core::pin::Pin;
104use core::ptr;
105use core::sync::atomic::AtomicU64;
106use core::task::{Context, Poll};
107
108mod context;
109pub use self::context::*;
110mod data;
111pub use self::data::*;
112mod func_refs;
113use func_refs::FuncRefs;
114
115/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
116///
117/// All WebAssembly instances and items will be attached to and refer to a
118/// [`Store`]. For example instances, functions, globals, and tables are all
119/// attached to a [`Store`]. Instances are created by instantiating a
120/// [`Module`](crate::Module) within a [`Store`].
121///
122/// A [`Store`] is intended to be a short-lived object in a program. No form
123/// of GC is implemented at this time so once an instance is created within a
124/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
125/// This makes [`Store`] unsuitable for creating an unbounded number of
126/// instances in it because [`Store`] will never release this memory. It's
127/// recommended to have a [`Store`] correspond roughly to the lifetime of a
128/// "main instance" that an embedding is interested in executing.
129///
130/// ## Type parameter `T`
131///
132/// Each [`Store`] has a type parameter `T` associated with it. This `T`
133/// represents state defined by the host. This state will be accessible through
134/// the [`Caller`](crate::Caller) type that host-defined functions get access
135/// to. This `T` is suitable for storing `Store`-specific information which
136/// imported functions may want access to.
137///
138/// The data `T` can be accessed through methods like [`Store::data`] and
139/// [`Store::data_mut`].
140///
141/// ## Stores, contexts, oh my
142///
143/// Most methods in Wasmtime take something of the form
144/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
145/// the first argument. These two traits allow ergonomically passing in the
146/// context you currently have to any method. The primary two sources of
147/// contexts are:
148///
149/// * `Store<T>`
150/// * `Caller<'_, T>`
151///
152/// corresponding to what you create and what you have access to in a host
153/// function. You can also explicitly acquire a [`StoreContext`] or
154/// [`StoreContextMut`] and pass that around as well.
155///
156/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
157/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
158/// form of context you have you can call various methods, create objects, etc.
159///
160/// ## Stores and `Default`
161///
162/// You can create a store with default configuration settings using
163/// `Store::default()`. This will create a brand new [`Engine`] with default
164/// configuration (see [`Config`](crate::Config) for more information).
165///
166/// ## Cross-store usage of items
167///
168/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
169/// [`Store`]. The store they belong to is the one they were created with
170/// (passed in as a parameter) or instantiated with. This store is the only
171/// store that can be used to interact with wasm items after they're created.
172///
173/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
174/// operations is incorrect. In other words it's considered a programmer error
175/// rather than a recoverable error for the wrong [`Store`] to be used when
176/// calling APIs.
177pub struct Store<T> {
178 // for comments about `ManuallyDrop`, see `Store::into_data`
179 inner: ManuallyDrop<Box<StoreInner<T>>>,
180}
181
182#[derive(Copy, Clone, Debug)]
183/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
184/// the WebAssembly VM.
185pub enum CallHook {
186 /// Indicates the VM is calling a WebAssembly function, from the host.
187 CallingWasm,
188 /// Indicates the VM is returning from a WebAssembly function, to the host.
189 ReturningFromWasm,
190 /// Indicates the VM is calling a host function, from WebAssembly.
191 CallingHost,
192 /// Indicates the VM is returning from a host function, to WebAssembly.
193 ReturningFromHost,
194}
195
196impl CallHook {
197 /// Indicates the VM is entering host code (exiting WebAssembly code)
198 pub fn entering_host(&self) -> bool {
199 match self {
200 CallHook::ReturningFromWasm | CallHook::CallingHost => true,
201 _ => false,
202 }
203 }
204 /// Indicates the VM is exiting host code (entering WebAssembly code)
205 pub fn exiting_host(&self) -> bool {
206 match self {
207 CallHook::ReturningFromHost | CallHook::CallingWasm => true,
208 _ => false,
209 }
210 }
211}
212
213/// Internal contents of a `Store<T>` that live on the heap.
214///
215/// The members of this struct are those that need to be generic over `T`, the
216/// store's internal type storage. Otherwise all things that don't rely on `T`
217/// should go into `StoreOpaque`.
218pub struct StoreInner<T> {
219 /// Generic metadata about the store that doesn't need access to `T`.
220 inner: StoreOpaque,
221
222 limiter: Option<ResourceLimiterInner<T>>,
223 call_hook: Option<CallHookInner<T>>,
224 epoch_deadline_behavior:
225 Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
226 // for comments about `ManuallyDrop`, see `Store::into_data`
227 data: ManuallyDrop<T>,
228}
229
230enum ResourceLimiterInner<T> {
231 Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
232 #[cfg(feature = "async")]
233 Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
234}
235
236/// An object that can take callbacks when the runtime enters or exits hostcalls.
237#[cfg(all(feature = "async", feature = "call-hook"))]
238#[async_trait::async_trait]
239pub trait CallHookHandler<T>: Send {
240 /// A callback to run when wasmtime is about to enter a host call, or when about to
241 /// exit the hostcall.
242 async fn handle_call_event(&self, t: StoreContextMut<'_, T>, ch: CallHook) -> Result<()>;
243}
244
245enum CallHookInner<T> {
246 #[cfg(feature = "call-hook")]
247 Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
248 #[cfg(all(feature = "async", feature = "call-hook"))]
249 Async(Box<dyn CallHookHandler<T> + Send + Sync>),
250 #[allow(dead_code)]
251 ForceTypeParameterToBeUsed {
252 uninhabited: Uninhabited,
253 _marker: marker::PhantomData<T>,
254 },
255}
256
257/// What to do after returning from a callback when the engine epoch reaches
258/// the deadline for a Store during execution of a function using that store.
259pub enum UpdateDeadline {
260 /// Extend the deadline by the specified number of ticks.
261 Continue(u64),
262 /// Extend the deadline by the specified number of ticks after yielding to
263 /// the async executor loop. This can only be used with an async [`Store`]
264 /// configured via [`Config::async_support`](crate::Config::async_support).
265 #[cfg(feature = "async")]
266 Yield(u64),
267}
268
269// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
270impl<T> Deref for StoreInner<T> {
271 type Target = StoreOpaque;
272 fn deref(&self) -> &Self::Target {
273 &self.inner
274 }
275}
276
277impl<T> DerefMut for StoreInner<T> {
278 fn deref_mut(&mut self) -> &mut Self::Target {
279 &mut self.inner
280 }
281}
282
283/// Monomorphic storage for a `Store<T>`.
284///
285/// This structure contains the bulk of the metadata about a `Store`. This is
286/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
287/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
288/// crate itself.
289pub struct StoreOpaque {
290 // This `StoreOpaque` structure has references to itself. These aren't
291 // immediately evident, however, so we need to tell the compiler that it
292 // contains self-references. This notably suppresses `noalias` annotations
293 // when this shows up in compiled code because types of this structure do
294 // indeed alias itself. An example of this is `default_callee` holds a
295 // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
296 // aliasing!
297 //
298 // It's somewhat unclear to me at this time if this is 100% sufficient to
299 // get all the right codegen in all the right places. For example does
300 // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
301 // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
302 // enough with `Pin` to understand if it's appropriate here (we do, for
303 // example want to allow movement in and out of `data: T`, just not movement
304 // of most of the other members). It's also not clear if using `Pin` in a
305 // few places buys us much other than a bunch of `unsafe` that we already
306 // sort of hand-wave away.
307 //
308 // In any case this seems like a good mid-ground for now where we're at
309 // least telling the compiler something about all the aliasing happening
310 // within a `Store`.
311 _marker: marker::PhantomPinned,
312
313 engine: Engine,
314 runtime_limits: VMRuntimeLimits,
315 instances: Vec<StoreInstance>,
316 #[cfg(feature = "component-model")]
317 num_component_instances: usize,
318 signal_handler: Option<Box<SignalHandler<'static>>>,
319 modules: ModuleRegistry,
320 func_refs: FuncRefs,
321 host_globals: Vec<StoreBox<VMHostGlobalContext>>,
322
323 // GC-related fields.
324 gc_store: Option<GcStore>,
325 gc_roots: RootSet,
326 gc_roots_list: GcRootsList,
327 // Types for which the embedder has created an allocator for.
328 gc_host_alloc_types: hashbrown::HashSet<RegisteredType>,
329
330 // Numbers of resources instantiated in this store, and their limits
331 instance_count: usize,
332 instance_limit: usize,
333 memory_count: usize,
334 memory_limit: usize,
335 table_count: usize,
336 table_limit: usize,
337 #[cfg(feature = "async")]
338 async_state: AsyncState,
339 // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
340 // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
341 // together. Then when we run out of gas, we inject the yield amount from the reserve
342 // until the reserve is empty.
343 fuel_reserve: u64,
344 fuel_yield_interval: Option<NonZeroU64>,
345 /// Indexed data within this `Store`, used to store information about
346 /// globals, functions, memories, etc.
347 ///
348 /// Note that this is `ManuallyDrop` because it needs to be dropped before
349 /// `rooted_host_funcs` below. This structure contains pointers which are
350 /// otherwise kept alive by the `Arc` references in `rooted_host_funcs`.
351 store_data: ManuallyDrop<StoreData>,
352 default_caller: InstanceHandle,
353
354 /// Used to optimzed wasm->host calls when the host function is defined with
355 /// `Func::new` to avoid allocating a new vector each time a function is
356 /// called.
357 hostcall_val_storage: Vec<Val>,
358 /// Same as `hostcall_val_storage`, but for the direction of the host
359 /// calling wasm.
360 wasm_val_raw_storage: Vec<ValRaw>,
361
362 /// A list of lists of definitions which have been used to instantiate
363 /// within this `Store`.
364 ///
365 /// Note that not all instantiations end up pushing to this list. At the
366 /// time of this writing only the `InstancePre<T>` type will push to this
367 /// list. Pushes to this list are typically accompanied with
368 /// `HostFunc::to_func_store_rooted` to clone an `Arc` here once which
369 /// preserves a strong reference to the `Arc` for each `HostFunc` stored
370 /// within the list of `Definition`s.
371 ///
372 /// Note that this is `ManuallyDrop` as it must be dropped after
373 /// `store_data` above, where the function pointers are stored.
374 rooted_host_funcs: ManuallyDrop<Vec<Arc<[Definition]>>>,
375
376 /// Keep track of what protection key is being used during allocation so
377 /// that the right memory pages can be enabled when entering WebAssembly
378 /// guest code.
379 pkey: Option<ProtectionKey>,
380
381 /// Runtime state for components used in the handling of resources, borrow,
382 /// and calls. These also interact with the `ResourceAny` type and its
383 /// internal representation.
384 #[cfg(feature = "component-model")]
385 component_host_table: crate::runtime::vm::component::ResourceTable,
386 #[cfg(feature = "component-model")]
387 component_calls: crate::runtime::vm::component::CallContexts,
388 #[cfg(feature = "component-model")]
389 host_resource_data: crate::component::HostResourceData,
390}
391
392#[cfg(feature = "async")]
393struct AsyncState {
394 current_suspend: UnsafeCell<*mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
395 current_poll_cx: UnsafeCell<*mut Context<'static>>,
396}
397
398// Lots of pesky unsafe cells and pointers in this structure. This means we need
399// to declare explicitly that we use this in a threadsafe fashion.
400#[cfg(feature = "async")]
401unsafe impl Send for AsyncState {}
402#[cfg(feature = "async")]
403unsafe impl Sync for AsyncState {}
404
405/// An RAII type to automatically mark a region of code as unsafe for GC.
406#[doc(hidden)]
407pub struct AutoAssertNoGc<'a> {
408 store: &'a mut StoreOpaque,
409 entered: bool,
410}
411
412impl<'a> AutoAssertNoGc<'a> {
413 #[inline]
414 pub fn new(store: &'a mut StoreOpaque) -> Self {
415 let entered = if !cfg!(feature = "gc") {
416 false
417 } else if let Some(gc_store) = store.gc_store.as_mut() {
418 gc_store.gc_heap.enter_no_gc_scope();
419 true
420 } else {
421 false
422 };
423
424 AutoAssertNoGc { store, entered }
425 }
426
427 /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
428 /// disables checks for no GC happening for the duration of this value.
429 ///
430 /// This is used when it is statically otherwise known that a GC doesn't
431 /// happen for the various types involved.
432 ///
433 /// # Unsafety
434 ///
435 /// This method is `unsafe` as it does not provide the same safety
436 /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
437 /// caller that a GC doesn't happen.
438 #[inline]
439 pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
440 if cfg!(debug_assertions) {
441 AutoAssertNoGc::new(store)
442 } else {
443 AutoAssertNoGc {
444 store,
445 entered: false,
446 }
447 }
448 }
449}
450
451impl core::ops::Deref for AutoAssertNoGc<'_> {
452 type Target = StoreOpaque;
453
454 #[inline]
455 fn deref(&self) -> &Self::Target {
456 &*self.store
457 }
458}
459
460impl core::ops::DerefMut for AutoAssertNoGc<'_> {
461 #[inline]
462 fn deref_mut(&mut self) -> &mut Self::Target {
463 &mut *self.store
464 }
465}
466
467impl Drop for AutoAssertNoGc<'_> {
468 #[inline]
469 fn drop(&mut self) {
470 if self.entered {
471 self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
472 }
473 }
474}
475
476/// Used to associate instances with the store.
477///
478/// This is needed to track if the instance was allocated explicitly with the on-demand
479/// instance allocator.
480struct StoreInstance {
481 handle: InstanceHandle,
482 kind: StoreInstanceKind,
483}
484
485enum StoreInstanceKind {
486 /// An actual, non-dummy instance.
487 Real {
488 /// The id of this instance's module inside our owning store's
489 /// `ModuleRegistry`.
490 module_id: RegisteredModuleId,
491 },
492
493 /// This is a dummy instance that is just an implementation detail for
494 /// something else. For example, host-created memories internally create a
495 /// dummy instance.
496 ///
497 /// Regardless of the configured instance allocator for the engine, dummy
498 /// instances always use the on-demand allocator to deallocate the instance.
499 Dummy,
500}
501
502impl<T> Store<T> {
503 /// Creates a new [`Store`] to be associated with the given [`Engine`] and
504 /// `data` provided.
505 ///
506 /// The created [`Store`] will place no additional limits on the size of
507 /// linear memories or tables at runtime. Linear memories and tables will
508 /// be allowed to grow to any upper limit specified in their definitions.
509 /// The store will limit the number of instances, linear memories, and
510 /// tables created to 10,000. This can be overridden with the
511 /// [`Store::limiter`] configuration method.
512 pub fn new(engine: &Engine, data: T) -> Self {
513 let pkey = engine.allocator().next_available_pkey();
514
515 let mut inner = Box::new(StoreInner {
516 inner: StoreOpaque {
517 _marker: marker::PhantomPinned,
518 engine: engine.clone(),
519 runtime_limits: Default::default(),
520 instances: Vec::new(),
521 #[cfg(feature = "component-model")]
522 num_component_instances: 0,
523 signal_handler: None,
524 gc_store: None,
525 gc_roots: RootSet::default(),
526 gc_roots_list: GcRootsList::default(),
527 gc_host_alloc_types: hashbrown::HashSet::default(),
528 modules: ModuleRegistry::default(),
529 func_refs: FuncRefs::default(),
530 host_globals: Vec::new(),
531 instance_count: 0,
532 instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
533 memory_count: 0,
534 memory_limit: crate::DEFAULT_MEMORY_LIMIT,
535 table_count: 0,
536 table_limit: crate::DEFAULT_TABLE_LIMIT,
537 #[cfg(feature = "async")]
538 async_state: AsyncState {
539 current_suspend: UnsafeCell::new(ptr::null_mut()),
540 current_poll_cx: UnsafeCell::new(ptr::null_mut()),
541 },
542 fuel_reserve: 0,
543 fuel_yield_interval: None,
544 store_data: ManuallyDrop::new(StoreData::new()),
545 default_caller: InstanceHandle::null(),
546 hostcall_val_storage: Vec::new(),
547 wasm_val_raw_storage: Vec::new(),
548 rooted_host_funcs: ManuallyDrop::new(Vec::new()),
549 pkey,
550 #[cfg(feature = "component-model")]
551 component_host_table: Default::default(),
552 #[cfg(feature = "component-model")]
553 component_calls: Default::default(),
554 #[cfg(feature = "component-model")]
555 host_resource_data: Default::default(),
556 },
557 limiter: None,
558 call_hook: None,
559 epoch_deadline_behavior: None,
560 data: ManuallyDrop::new(data),
561 });
562
563 // Wasmtime uses the callee argument to host functions to learn about
564 // the original pointer to the `Store` itself, allowing it to
565 // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
566 // however, there's no "callee" to provide. To fix this we allocate a
567 // single "default callee" for the entire `Store`. This is then used as
568 // part of `Func::call` to guarantee that the `callee: *mut VMContext`
569 // is never null.
570 inner.default_caller = {
571 let module = Arc::new(wasmtime_environ::Module::default());
572 let shim = ModuleRuntimeInfo::bare(module);
573 let allocator = OnDemandInstanceAllocator::default();
574 allocator
575 .validate_module(shim.module(), shim.offsets())
576 .unwrap();
577 let mut instance = unsafe {
578 allocator
579 .allocate_module(InstanceAllocationRequest {
580 host_state: Box::new(()),
581 imports: Default::default(),
582 store: StorePtr::empty(),
583 runtime_info: &shim,
584 wmemcheck: engine.config().wmemcheck,
585 pkey: None,
586 })
587 .expect("failed to allocate default callee")
588 };
589
590 // Note the erasure of the lifetime here into `'static`, so in
591 // general usage of this trait object must be strictly bounded to
592 // the `Store` itself, and is a variant that we have to maintain
593 // throughout Wasmtime.
594 unsafe {
595 let traitobj = mem::transmute::<
596 *mut (dyn crate::runtime::vm::Store + '_),
597 *mut (dyn crate::runtime::vm::Store + 'static),
598 >(&mut *inner);
599 instance.set_store(traitobj);
600 }
601 instance
602 };
603
604 Self {
605 inner: ManuallyDrop::new(inner),
606 }
607 }
608
609 /// Access the underlying data owned by this `Store`.
610 #[inline]
611 pub fn data(&self) -> &T {
612 self.inner.data()
613 }
614
615 /// Access the underlying data owned by this `Store`.
616 #[inline]
617 pub fn data_mut(&mut self) -> &mut T {
618 self.inner.data_mut()
619 }
620
621 /// Consumes this [`Store`], destroying it, and returns the underlying data.
622 pub fn into_data(mut self) -> T {
623 // This is an unsafe operation because we want to avoid having a runtime
624 // check or boolean for whether the data is actually contained within a
625 // `Store`. The data itself is stored as `ManuallyDrop` since we're
626 // manually managing the memory here, and there's also a `ManuallyDrop`
627 // around the `Box<StoreInner<T>>`. The way this works though is a bit
628 // tricky, so here's how things get dropped appropriately:
629 //
630 // * When a `Store<T>` is normally dropped, the custom destructor for
631 // `Store<T>` will drop `T`, then the `self.inner` field. The
632 // rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
633 // `StoreInner<T>`. This cleans up all internal fields and doesn't
634 // touch `T` because it's wrapped in `ManuallyDrop`.
635 //
636 // * When calling this method we skip the top-level destructor for
637 // `Store<T>` with `mem::forget`. This skips both the destructor for
638 // `T` and the destructor for `StoreInner<T>`. We do, however, run the
639 // destructor for `Box<StoreInner<T>>` which, like above, will skip
640 // the destructor for `T` since it's `ManuallyDrop`.
641 //
642 // In both cases all the other fields of `StoreInner<T>` should all get
643 // dropped, and the manual management of destructors is basically
644 // between this method and `Drop for Store<T>`. Note that this also
645 // means that `Drop for StoreInner<T>` cannot access `self.data`, so
646 // there is a comment indicating this as well.
647 unsafe {
648 let mut inner = ManuallyDrop::take(&mut self.inner);
649 core::mem::forget(self);
650 ManuallyDrop::take(&mut inner.data)
651 }
652 }
653
654 /// Configures the [`ResourceLimiter`] used to limit resource creation
655 /// within this [`Store`].
656 ///
657 /// Whenever resources such as linear memory, tables, or instances are
658 /// allocated the `limiter` specified here is invoked with the store's data
659 /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
660 /// being allocated. The returned [`ResourceLimiter`] is intended to live
661 /// within the `T` itself, for example by storing a
662 /// [`StoreLimits`](crate::StoreLimits).
663 ///
664 /// Note that this limiter is only used to limit the creation/growth of
665 /// resources in the future, this does not retroactively attempt to apply
666 /// limits to the [`Store`].
667 ///
668 /// # Examples
669 ///
670 /// ```
671 /// use wasmtime::*;
672 ///
673 /// struct MyApplicationState {
674 /// my_state: u32,
675 /// limits: StoreLimits,
676 /// }
677 ///
678 /// let engine = Engine::default();
679 /// let my_state = MyApplicationState {
680 /// my_state: 42,
681 /// limits: StoreLimitsBuilder::new()
682 /// .memory_size(1 << 20 /* 1 MB */)
683 /// .instances(2)
684 /// .build(),
685 /// };
686 /// let mut store = Store::new(&engine, my_state);
687 /// store.limiter(|state| &mut state.limits);
688 ///
689 /// // Creation of smaller memories is allowed
690 /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
691 ///
692 /// // Creation of a larger memory, however, will exceed the 1MB limit we've
693 /// // configured
694 /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
695 ///
696 /// // The number of instances in this store is limited to 2, so the third
697 /// // instance here should fail.
698 /// let module = Module::new(&engine, "(module)").unwrap();
699 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
700 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
701 /// assert!(Instance::new(&mut store, &module, &[]).is_err());
702 /// ```
703 ///
704 /// [`ResourceLimiter`]: crate::ResourceLimiter
705 pub fn limiter(
706 &mut self,
707 mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
708 ) {
709 // Apply the limits on instances, tables, and memory given by the limiter:
710 let inner = &mut self.inner;
711 let (instance_limit, table_limit, memory_limit) = {
712 let l = limiter(&mut inner.data);
713 (l.instances(), l.tables(), l.memories())
714 };
715 let innermost = &mut inner.inner;
716 innermost.instance_limit = instance_limit;
717 innermost.table_limit = table_limit;
718 innermost.memory_limit = memory_limit;
719
720 // Save the limiter accessor function:
721 inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
722 }
723
724 /// Configures the [`ResourceLimiterAsync`](crate::ResourceLimiterAsync)
725 /// used to limit resource creation within this [`Store`].
726 ///
727 /// This method is an asynchronous variant of the [`Store::limiter`] method
728 /// where the embedder can block the wasm request for more resources with
729 /// host `async` execution of futures.
730 ///
731 /// By using a [`ResourceLimiterAsync`](`crate::ResourceLimiterAsync`)
732 /// with a [`Store`], you can no longer use
733 /// [`Memory::new`](`crate::Memory::new`),
734 /// [`Memory::grow`](`crate::Memory::grow`),
735 /// [`Table::new`](`crate::Table::new`), and
736 /// [`Table::grow`](`crate::Table::grow`). Instead, you must use their
737 /// `async` variants: [`Memory::new_async`](`crate::Memory::new_async`),
738 /// [`Memory::grow_async`](`crate::Memory::grow_async`),
739 /// [`Table::new_async`](`crate::Table::new_async`), and
740 /// [`Table::grow_async`](`crate::Table::grow_async`).
741 ///
742 /// Note that this limiter is only used to limit the creation/growth of
743 /// resources in the future, this does not retroactively attempt to apply
744 /// limits to the [`Store`]. Additionally this must be used with an async
745 /// [`Store`] configured via
746 /// [`Config::async_support`](crate::Config::async_support).
747 #[cfg(feature = "async")]
748 pub fn limiter_async(
749 &mut self,
750 mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync)
751 + Send
752 + Sync
753 + 'static,
754 ) {
755 debug_assert!(self.inner.async_support());
756 // Apply the limits on instances, tables, and memory given by the limiter:
757 let inner = &mut self.inner;
758 let (instance_limit, table_limit, memory_limit) = {
759 let l = limiter(&mut inner.data);
760 (l.instances(), l.tables(), l.memories())
761 };
762 let innermost = &mut inner.inner;
763 innermost.instance_limit = instance_limit;
764 innermost.table_limit = table_limit;
765 innermost.memory_limit = memory_limit;
766
767 // Save the limiter accessor function:
768 inner.limiter = Some(ResourceLimiterInner::Async(Box::new(limiter)));
769 }
770
771 /// Configures an async function that runs on calls and returns between
772 /// WebAssembly and host code. For the non-async equivalent of this method,
773 /// see [`Store::call_hook`].
774 ///
775 /// The function is passed a [`CallHook`] argument, which indicates which
776 /// state transition the VM is making.
777 ///
778 /// This function's future may return a [`Trap`]. If a trap is returned
779 /// when an import was called, it is immediately raised as-if the host
780 /// import had returned the trap. If a trap is returned after wasm returns
781 /// to the host then the wasm function's result is ignored and this trap is
782 /// returned instead.
783 ///
784 /// After this function returns a trap, it may be called for subsequent
785 /// returns to host or wasm code as the trap propagates to the root call.
786 #[cfg(all(feature = "async", feature = "call-hook"))]
787 pub fn call_hook_async(&mut self, hook: impl CallHookHandler<T> + Send + Sync + 'static) {
788 self.inner.call_hook = Some(CallHookInner::Async(Box::new(hook)));
789 }
790
791 /// Configure a function that runs on calls and returns between WebAssembly
792 /// and host code.
793 ///
794 /// The function is passed a [`CallHook`] argument, which indicates which
795 /// state transition the VM is making.
796 ///
797 /// This function may return a [`Trap`]. If a trap is returned when an
798 /// import was called, it is immediately raised as-if the host import had
799 /// returned the trap. If a trap is returned after wasm returns to the host
800 /// then the wasm function's result is ignored and this trap is returned
801 /// instead.
802 ///
803 /// After this function returns a trap, it may be called for subsequent returns
804 /// to host or wasm code as the trap propagates to the root call.
805 #[cfg(feature = "call-hook")]
806 pub fn call_hook(
807 &mut self,
808 hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
809 ) {
810 self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
811 }
812
813 /// Returns the [`Engine`] that this store is associated with.
814 pub fn engine(&self) -> &Engine {
815 self.inner.engine()
816 }
817
818 /// Perform garbage collection.
819 ///
820 /// Note that it is not required to actively call this function. GC will
821 /// automatically happen according to various internal heuristics. This is
822 /// provided if fine-grained control over the GC is desired.
823 ///
824 /// This method is only available when the `gc` Cargo feature is enabled.
825 #[cfg(feature = "gc")]
826 pub fn gc(&mut self) {
827 self.inner.gc()
828 }
829
830 /// Perform garbage collection asynchronously.
831 ///
832 /// Note that it is not required to actively call this function. GC will
833 /// automatically happen according to various internal heuristics. This is
834 /// provided if fine-grained control over the GC is desired.
835 ///
836 /// This method is only available when the `gc` Cargo feature is enabled.
837 #[cfg(all(feature = "async", feature = "gc"))]
838 pub async fn gc_async(&mut self)
839 where
840 T: Send,
841 {
842 self.inner.gc_async().await;
843 }
844
845 /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
846 /// be configured via [`Store::set_fuel`].
847 ///
848 /// # Errors
849 ///
850 /// This function will return an error if fuel consumption is not enabled
851 /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
852 pub fn get_fuel(&self) -> Result<u64> {
853 self.inner.get_fuel()
854 }
855
856 /// Set the fuel to this [`Store`] for wasm to consume while executing.
857 ///
858 /// For this method to work fuel consumption must be enabled via
859 /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
860 /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
861 /// immediately trap). This function must be called for the store to have
862 /// some fuel to allow WebAssembly to execute.
863 ///
864 /// Most WebAssembly instructions consume 1 unit of fuel. Some
865 /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
866 /// units, as any execution cost associated with them involves other
867 /// instructions which do consume fuel.
868 ///
869 /// Note that when fuel is entirely consumed it will cause wasm to trap.
870 ///
871 /// # Errors
872 ///
873 /// This function will return an error if fuel consumption is not enabled via
874 /// [`Config::consume_fuel`](crate::Config::consume_fuel).
875 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
876 self.inner.set_fuel(fuel)
877 }
878
879 /// Configures a [`Store`] to yield execution of async WebAssembly code
880 /// periodically.
881 ///
882 /// When a [`Store`] is configured to consume fuel with
883 /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
884 /// configure WebAssembly to be suspended and control will be yielded back to the
885 /// caller every `interval` units of fuel consumed. This is only suitable with use of
886 /// a store associated with an [async config](crate::Config::async_support) because
887 /// only then are futures used and yields are possible.
888 ///
889 /// The purpose of this behavior is to ensure that futures which represent
890 /// execution of WebAssembly do not execute too long inside their
891 /// `Future::poll` method. This allows for some form of cooperative
892 /// multitasking where WebAssembly will voluntarily yield control
893 /// periodically (based on fuel consumption) back to the running thread.
894 ///
895 /// Note that futures returned by this crate will automatically flag
896 /// themselves to get re-polled if a yield happens. This means that
897 /// WebAssembly will continue to execute, just after giving the host an
898 /// opportunity to do something else.
899 ///
900 /// The `interval` parameter indicates how much fuel should be
901 /// consumed between yields of an async future. When fuel runs out wasm will trap.
902 ///
903 /// # Error
904 ///
905 /// This method will error if it is not called on a store associated with an [async
906 /// config](crate::Config::async_support).
907 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
908 self.inner.fuel_async_yield_interval(interval)
909 }
910
911 /// Sets the epoch deadline to a certain number of ticks in the future.
912 ///
913 /// When the Wasm guest code is compiled with epoch-interruption
914 /// instrumentation
915 /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
916 /// and when the `Engine`'s epoch is incremented
917 /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
918 /// past a deadline, execution can be configured to either trap or
919 /// yield and then continue.
920 ///
921 /// This deadline is always set relative to the current epoch:
922 /// `ticks_beyond_current` ticks in the future. The deadline can
923 /// be set explicitly via this method, or refilled automatically
924 /// on a yield if configured via
925 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
926 /// this method is invoked, the deadline is reached when
927 /// [`Engine::increment_epoch()`] has been invoked at least
928 /// `ticks_beyond_current` times.
929 ///
930 /// By default a store will trap immediately with an epoch deadline of 0
931 /// (which has always "elapsed"). This method is required to be configured
932 /// for stores with epochs enabled to some future epoch deadline.
933 ///
934 /// See documentation on
935 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
936 /// for an introduction to epoch-based interruption.
937 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
938 self.inner.set_epoch_deadline(ticks_beyond_current);
939 }
940
941 /// Configures epoch-deadline expiration to trap.
942 ///
943 /// When epoch-interruption-instrumented code is executed on this
944 /// store and the epoch deadline is reached before completion,
945 /// with the store configured in this way, execution will
946 /// terminate with a trap as soon as an epoch check in the
947 /// instrumented code is reached.
948 ///
949 /// This behavior is the default if the store is not otherwise
950 /// configured via
951 /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
952 /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
953 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
954 ///
955 /// This setting is intended to allow for coarse-grained
956 /// interruption, but not a deterministic deadline of a fixed,
957 /// finite interval. For deterministic interruption, see the
958 /// "fuel" mechanism instead.
959 ///
960 /// Note that when this is used it's required to call
961 /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
962 /// trap.
963 ///
964 /// See documentation on
965 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
966 /// for an introduction to epoch-based interruption.
967 pub fn epoch_deadline_trap(&mut self) {
968 self.inner.epoch_deadline_trap();
969 }
970
971 /// Configures epoch-deadline expiration to invoke a custom callback
972 /// function.
973 ///
974 /// When epoch-interruption-instrumented code is executed on this
975 /// store and the epoch deadline is reached before completion, the
976 /// provided callback function is invoked.
977 ///
978 /// This callback should either return an [`UpdateDeadline`], or
979 /// return an error, which will terminate execution with a trap.
980 ///
981 /// The [`UpdateDeadline`] is a positive number of ticks to
982 /// add to the epoch deadline, as well as indicating what
983 /// to do after the callback returns. If the [`Store`] is
984 /// configured with async support, then the callback may return
985 /// [`UpdateDeadline::Yield`] to yield to the async executor before
986 /// updating the epoch deadline. Alternatively, the callback may
987 /// return [`UpdateDeadline::Continue`] to update the epoch deadline
988 /// immediately.
989 ///
990 /// This setting is intended to allow for coarse-grained
991 /// interruption, but not a deterministic deadline of a fixed,
992 /// finite interval. For deterministic interruption, see the
993 /// "fuel" mechanism instead.
994 ///
995 /// See documentation on
996 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
997 /// for an introduction to epoch-based interruption.
998 pub fn epoch_deadline_callback(
999 &mut self,
1000 callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1001 ) {
1002 self.inner.epoch_deadline_callback(Box::new(callback));
1003 }
1004
1005 /// Configures epoch-deadline expiration to yield to the async
1006 /// caller and the update the deadline.
1007 ///
1008 /// When epoch-interruption-instrumented code is executed on this
1009 /// store and the epoch deadline is reached before completion,
1010 /// with the store configured in this way, execution will yield
1011 /// (the future will return `Pending` but re-awake itself for
1012 /// later execution) and, upon resuming, the store will be
1013 /// configured with an epoch deadline equal to the current epoch
1014 /// plus `delta` ticks.
1015 ///
1016 /// This setting is intended to allow for cooperative timeslicing
1017 /// of multiple CPU-bound Wasm guests in different stores, all
1018 /// executing under the control of an async executor. To drive
1019 /// this, stores should be configured to "yield and update"
1020 /// automatically with this function, and some external driver (a
1021 /// thread that wakes up periodically, or a timer
1022 /// signal/interrupt) should call
1023 /// [`Engine::increment_epoch()`](crate::Engine::increment_epoch).
1024 ///
1025 /// See documentation on
1026 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1027 /// for an introduction to epoch-based interruption.
1028 #[cfg(feature = "async")]
1029 pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
1030 self.inner.epoch_deadline_async_yield_and_update(delta);
1031 }
1032}
1033
1034impl<'a, T> StoreContext<'a, T> {
1035 pub(crate) fn async_support(&self) -> bool {
1036 self.0.async_support()
1037 }
1038
1039 /// Returns the underlying [`Engine`] this store is connected to.
1040 pub fn engine(&self) -> &Engine {
1041 self.0.engine()
1042 }
1043
1044 /// Access the underlying data owned by this `Store`.
1045 ///
1046 /// Same as [`Store::data`].
1047 pub fn data(&self) -> &'a T {
1048 self.0.data()
1049 }
1050
1051 /// Returns the remaining fuel in this store.
1052 ///
1053 /// For more information see [`Store::get_fuel`].
1054 pub fn get_fuel(&self) -> Result<u64> {
1055 self.0.get_fuel()
1056 }
1057}
1058
1059impl<'a, T> StoreContextMut<'a, T> {
1060 /// Access the underlying data owned by this `Store`.
1061 ///
1062 /// Same as [`Store::data`].
1063 pub fn data(&self) -> &T {
1064 self.0.data()
1065 }
1066
1067 /// Access the underlying data owned by this `Store`.
1068 ///
1069 /// Same as [`Store::data_mut`].
1070 pub fn data_mut(&mut self) -> &mut T {
1071 self.0.data_mut()
1072 }
1073
1074 /// Returns the underlying [`Engine`] this store is connected to.
1075 pub fn engine(&self) -> &Engine {
1076 self.0.engine()
1077 }
1078
1079 /// Perform garbage collection of `ExternRef`s.
1080 ///
1081 /// Same as [`Store::gc`].
1082 ///
1083 /// This method is only available when the `gc` Cargo feature is enabled.
1084 #[cfg(feature = "gc")]
1085 pub fn gc(&mut self) {
1086 self.0.gc()
1087 }
1088
1089 /// Perform garbage collection of `ExternRef`s.
1090 ///
1091 /// Same as [`Store::gc`].
1092 ///
1093 /// This method is only available when the `gc` Cargo feature is enabled.
1094 #[cfg(all(feature = "async", feature = "gc"))]
1095 pub async fn gc_async(&mut self)
1096 where
1097 T: Send,
1098 {
1099 self.0.gc_async().await;
1100 }
1101
1102 /// Returns remaining fuel in this store.
1103 ///
1104 /// For more information see [`Store::get_fuel`]
1105 pub fn get_fuel(&self) -> Result<u64> {
1106 self.0.get_fuel()
1107 }
1108
1109 /// Set the amount of fuel in this store.
1110 ///
1111 /// For more information see [`Store::set_fuel`]
1112 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1113 self.0.set_fuel(fuel)
1114 }
1115
1116 /// Configures this `Store` to periodically yield while executing futures.
1117 ///
1118 /// For more information see [`Store::fuel_async_yield_interval`]
1119 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1120 self.0.fuel_async_yield_interval(interval)
1121 }
1122
1123 /// Sets the epoch deadline to a certain number of ticks in the future.
1124 ///
1125 /// For more information see [`Store::set_epoch_deadline`].
1126 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1127 self.0.set_epoch_deadline(ticks_beyond_current);
1128 }
1129
1130 /// Configures epoch-deadline expiration to trap.
1131 ///
1132 /// For more information see [`Store::epoch_deadline_trap`].
1133 pub fn epoch_deadline_trap(&mut self) {
1134 self.0.epoch_deadline_trap();
1135 }
1136
1137 /// Configures epoch-deadline expiration to yield to the async
1138 /// caller and the update the deadline.
1139 ///
1140 /// For more information see
1141 /// [`Store::epoch_deadline_async_yield_and_update`].
1142 #[cfg(feature = "async")]
1143 pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
1144 self.0.epoch_deadline_async_yield_and_update(delta);
1145 }
1146}
1147
1148impl<T> StoreInner<T> {
1149 #[inline]
1150 fn data(&self) -> &T {
1151 &self.data
1152 }
1153
1154 #[inline]
1155 fn data_mut(&mut self) -> &mut T {
1156 &mut self.data
1157 }
1158
1159 #[inline]
1160 pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1161 if self.inner.pkey.is_none() && self.call_hook.is_none() {
1162 Ok(())
1163 } else {
1164 self.call_hook_slow_path(s)
1165 }
1166 }
1167
1168 fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1169 if let Some(pkey) = &self.inner.pkey {
1170 let allocator = self.engine().allocator();
1171 match s {
1172 CallHook::CallingWasm | CallHook::ReturningFromHost => {
1173 allocator.restrict_to_pkey(*pkey)
1174 }
1175 CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1176 }
1177 }
1178
1179 // Temporarily take the configured behavior to avoid mutably borrowing
1180 // multiple times.
1181 #[cfg_attr(not(feature = "call-hook"), allow(unreachable_patterns))]
1182 if let Some(mut call_hook) = self.call_hook.take() {
1183 let result = self.invoke_call_hook(&mut call_hook, s);
1184 self.call_hook = Some(call_hook);
1185 return result;
1186 }
1187
1188 Ok(())
1189 }
1190
1191 fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1192 match call_hook {
1193 #[cfg(feature = "call-hook")]
1194 CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1195
1196 #[cfg(all(feature = "async", feature = "call-hook"))]
1197 CallHookInner::Async(handler) => unsafe {
1198 self.inner
1199 .async_cx()
1200 .ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
1201 .block_on(
1202 handler
1203 .handle_call_event((&mut *self).as_context_mut(), s)
1204 .as_mut(),
1205 )?
1206 },
1207
1208 CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1209 let _ = s;
1210 match *uninhabited {}
1211 }
1212 }
1213 }
1214}
1215
1216fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1217 fuel_reserve.saturating_add_signed(-injected_fuel)
1218}
1219
1220// Add remaining fuel from the reserve into the active fuel if there is any left.
1221fn refuel(
1222 injected_fuel: &mut i64,
1223 fuel_reserve: &mut u64,
1224 yield_interval: Option<NonZeroU64>,
1225) -> bool {
1226 let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1227 if fuel > 0 {
1228 set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1229 true
1230 } else {
1231 false
1232 }
1233}
1234
1235fn set_fuel(
1236 injected_fuel: &mut i64,
1237 fuel_reserve: &mut u64,
1238 yield_interval: Option<NonZeroU64>,
1239 new_fuel_amount: u64,
1240) {
1241 let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1242 // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1243 // for the VM to use.
1244 let injected = core::cmp::min(interval, new_fuel_amount);
1245 // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1246 // VM at once to be i64 range.
1247 let injected = core::cmp::min(injected, i64::MAX as u64);
1248 // Add whatever is left over after injection to the reserve for later use.
1249 *fuel_reserve = new_fuel_amount - injected;
1250 // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1251 // this counter is positive.
1252 *injected_fuel = -(injected as i64);
1253}
1254
1255#[doc(hidden)]
1256impl StoreOpaque {
1257 pub fn id(&self) -> StoreId {
1258 self.store_data.id()
1259 }
1260
1261 pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1262 fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1263 let new = slot.saturating_add(amt);
1264 if new > max {
1265 bail!(
1266 "resource limit exceeded: {} count too high at {}",
1267 desc,
1268 new
1269 );
1270 }
1271 *slot = new;
1272 Ok(())
1273 }
1274
1275 let module = module.env_module();
1276 let memories = module.memory_plans.len() - module.num_imported_memories;
1277 let tables = module.table_plans.len() - module.num_imported_tables;
1278
1279 bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1280 bump(
1281 &mut self.memory_count,
1282 self.memory_limit,
1283 memories,
1284 "memory",
1285 )?;
1286 bump(&mut self.table_count, self.table_limit, tables, "table")?;
1287
1288 Ok(())
1289 }
1290
1291 #[inline]
1292 pub fn async_support(&self) -> bool {
1293 cfg!(feature = "async") && self.engine().config().async_support
1294 }
1295
1296 #[inline]
1297 pub fn engine(&self) -> &Engine {
1298 &self.engine
1299 }
1300
1301 #[inline]
1302 pub fn store_data(&self) -> &StoreData {
1303 &self.store_data
1304 }
1305
1306 #[inline]
1307 pub fn store_data_mut(&mut self) -> &mut StoreData {
1308 &mut self.store_data
1309 }
1310
1311 #[inline]
1312 pub(crate) fn modules(&self) -> &ModuleRegistry {
1313 &self.modules
1314 }
1315
1316 #[inline]
1317 pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1318 &mut self.modules
1319 }
1320
1321 pub(crate) fn func_refs(&mut self) -> &mut FuncRefs {
1322 &mut self.func_refs
1323 }
1324
1325 pub(crate) fn fill_func_refs(&mut self) {
1326 self.func_refs.fill(&mut self.modules);
1327 }
1328
1329 pub(crate) fn push_instance_pre_func_refs(&mut self, func_refs: Arc<[VMFuncRef]>) {
1330 self.func_refs.push_instance_pre_func_refs(func_refs);
1331 }
1332
1333 pub(crate) fn host_globals(&mut self) -> &mut Vec<StoreBox<VMHostGlobalContext>> {
1334 &mut self.host_globals
1335 }
1336
1337 pub fn module_for_instance(&self, instance: InstanceId) -> Option<&'_ Module> {
1338 match self.instances[instance.0].kind {
1339 StoreInstanceKind::Dummy => None,
1340 StoreInstanceKind::Real { module_id } => {
1341 let module = self
1342 .modules()
1343 .lookup_module_by_id(module_id)
1344 .expect("should always have a registered module for real instances");
1345 Some(module)
1346 }
1347 }
1348 }
1349
1350 pub unsafe fn add_instance(
1351 &mut self,
1352 handle: InstanceHandle,
1353 module_id: RegisteredModuleId,
1354 ) -> InstanceId {
1355 self.instances.push(StoreInstance {
1356 handle: handle.clone(),
1357 kind: StoreInstanceKind::Real { module_id },
1358 });
1359 InstanceId(self.instances.len() - 1)
1360 }
1361
1362 /// Add a dummy instance that to the store.
1363 ///
1364 /// These are instances that are just implementation details of something
1365 /// else (e.g. host-created memories that are not actually defined in any
1366 /// Wasm module) and therefore shouldn't show up in things like core dumps.
1367 pub unsafe fn add_dummy_instance(&mut self, handle: InstanceHandle) -> InstanceId {
1368 self.instances.push(StoreInstance {
1369 handle: handle.clone(),
1370 kind: StoreInstanceKind::Dummy,
1371 });
1372 InstanceId(self.instances.len() - 1)
1373 }
1374
1375 pub fn instance(&self, id: InstanceId) -> &InstanceHandle {
1376 &self.instances[id.0].handle
1377 }
1378
1379 pub fn instance_mut(&mut self, id: InstanceId) -> &mut InstanceHandle {
1380 &mut self.instances[id.0].handle
1381 }
1382
1383 /// Get all instances (ignoring dummy instances) within this store.
1384 pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1385 let instances = self
1386 .instances
1387 .iter()
1388 .enumerate()
1389 .filter_map(|(idx, inst)| {
1390 let id = InstanceId::from_index(idx);
1391 if let StoreInstanceKind::Dummy = inst.kind {
1392 None
1393 } else {
1394 Some(InstanceData::from_id(id))
1395 }
1396 })
1397 .collect::<Vec<_>>();
1398 instances
1399 .into_iter()
1400 .map(|i| Instance::from_wasmtime(i, self))
1401 }
1402
1403 /// Get all memories (host- or Wasm-defined) within this store.
1404 pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1405 // NB: Host-created memories have dummy instances. Therefore, we can get
1406 // all memories in the store by iterating over all instances (including
1407 // dummy instances) and getting each of their defined memories.
1408 let mems = self
1409 .instances
1410 .iter_mut()
1411 .flat_map(|instance| instance.handle.defined_memories())
1412 .collect::<Vec<_>>();
1413 mems.into_iter()
1414 .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1415 }
1416
1417 /// Iterate over all tables (host- or Wasm-defined) within this store.
1418 pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1419 // NB: Host-created tables have dummy instances. Therefore, we can get
1420 // all memories in the store by iterating over all instances (including
1421 // dummy instances) and getting each of their defined memories.
1422
1423 struct TempTakeInstances<'a> {
1424 instances: Vec<StoreInstance>,
1425 store: &'a mut StoreOpaque,
1426 }
1427
1428 impl<'a> TempTakeInstances<'a> {
1429 fn new(store: &'a mut StoreOpaque) -> Self {
1430 let instances = mem::take(&mut store.instances);
1431 Self { instances, store }
1432 }
1433 }
1434
1435 impl Drop for TempTakeInstances<'_> {
1436 fn drop(&mut self) {
1437 assert!(self.store.instances.is_empty());
1438 self.store.instances = mem::take(&mut self.instances);
1439 }
1440 }
1441
1442 let mut temp = TempTakeInstances::new(self);
1443 for instance in temp.instances.iter_mut() {
1444 for table in instance.handle.defined_tables() {
1445 let table = unsafe { Table::from_wasmtime_table(table, temp.store) };
1446 f(temp.store, table);
1447 }
1448 }
1449 }
1450
1451 /// Iterate over all globals (host- or Wasm-defined) within this store.
1452 pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1453 struct TempTakeHostGlobalsAndInstances<'a> {
1454 host_globals: Vec<StoreBox<VMHostGlobalContext>>,
1455 instances: Vec<StoreInstance>,
1456 store: &'a mut StoreOpaque,
1457 }
1458
1459 impl<'a> TempTakeHostGlobalsAndInstances<'a> {
1460 fn new(store: &'a mut StoreOpaque) -> Self {
1461 let host_globals = mem::take(&mut store.host_globals);
1462 let instances = mem::take(&mut store.instances);
1463 Self {
1464 host_globals,
1465 instances,
1466 store,
1467 }
1468 }
1469 }
1470
1471 impl Drop for TempTakeHostGlobalsAndInstances<'_> {
1472 fn drop(&mut self) {
1473 assert!(self.store.host_globals.is_empty());
1474 self.store.host_globals = mem::take(&mut self.host_globals);
1475 assert!(self.store.instances.is_empty());
1476 self.store.instances = mem::take(&mut self.instances);
1477 }
1478 }
1479
1480 let mut temp = TempTakeHostGlobalsAndInstances::new(self);
1481 unsafe {
1482 // First enumerate all the host-created globals.
1483 for global in temp.host_globals.iter() {
1484 let export = ExportGlobal {
1485 definition: &mut (*global.get()).global as *mut _,
1486 vmctx: core::ptr::null_mut(),
1487 global: (*global.get()).ty.to_wasm_type(),
1488 };
1489 let global = Global::from_wasmtime_global(export, temp.store);
1490 f(temp.store, global);
1491 }
1492
1493 // Then enumerate all instances' defined globals.
1494 for instance in temp.instances.iter_mut() {
1495 for (_, export) in instance.handle.defined_globals() {
1496 let global = Global::from_wasmtime_global(export, temp.store);
1497 f(temp.store, global);
1498 }
1499 }
1500 }
1501 }
1502
1503 #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1504 pub fn set_signal_handler(&mut self, handler: Option<Box<SignalHandler<'static>>>) {
1505 self.signal_handler = handler;
1506 }
1507
1508 #[inline]
1509 pub fn runtime_limits(&self) -> &VMRuntimeLimits {
1510 &self.runtime_limits
1511 }
1512
1513 #[inline(never)]
1514 pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1515 assert!(self.gc_store.is_none());
1516 let gc_store = allocate_gc_store(self.engine())?;
1517 self.gc_store = Some(gc_store);
1518 return Ok(());
1519
1520 #[cfg(feature = "gc")]
1521 fn allocate_gc_store(engine: &Engine) -> Result<GcStore> {
1522 let (index, heap) = if engine.features().gc_types() {
1523 engine
1524 .allocator()
1525 .allocate_gc_heap(&**engine.gc_runtime())?
1526 } else {
1527 (
1528 GcHeapAllocationIndex::default(),
1529 crate::runtime::vm::disabled_gc_heap(),
1530 )
1531 };
1532 Ok(GcStore::new(index, heap))
1533 }
1534
1535 #[cfg(not(feature = "gc"))]
1536 fn allocate_gc_store(_engine: &Engine) -> Result<GcStore> {
1537 Ok(GcStore::new(
1538 GcHeapAllocationIndex::default(),
1539 crate::runtime::vm::disabled_gc_heap(),
1540 ))
1541 }
1542 }
1543
1544 #[inline]
1545 #[cfg(feature = "gc")]
1546 pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1547 match &self.gc_store {
1548 Some(gc_store) => Ok(gc_store),
1549 None => bail!("GC heap not initialized yet"),
1550 }
1551 }
1552
1553 #[inline]
1554 pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1555 if self.gc_store.is_none() {
1556 self.allocate_gc_heap()?;
1557 }
1558 Ok(self.unwrap_gc_store_mut())
1559 }
1560
1561 #[inline]
1562 #[cfg(feature = "gc")]
1563 pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1564 self.gc_store
1565 .as_ref()
1566 .expect("attempted to access the store's GC heap before it has been allocated")
1567 }
1568
1569 #[inline]
1570 pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1571 self.gc_store
1572 .as_mut()
1573 .expect("attempted to access the store's GC heap before it has been allocated")
1574 }
1575
1576 #[inline]
1577 pub(crate) fn gc_roots(&self) -> &RootSet {
1578 &self.gc_roots
1579 }
1580
1581 #[inline]
1582 pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1583 &mut self.gc_roots
1584 }
1585
1586 #[inline]
1587 pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1588 self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1589 }
1590
1591 #[cfg(feature = "gc")]
1592 pub fn gc(&mut self) {
1593 // If the GC heap hasn't been initialized, there is nothing to collect.
1594 if self.gc_store.is_none() {
1595 return;
1596 }
1597
1598 // Take the GC roots out of `self` so we can borrow it mutably but still
1599 // call mutable methods on `self`.
1600 let mut roots = core::mem::take(&mut self.gc_roots_list);
1601
1602 self.trace_roots(&mut roots);
1603 self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1604
1605 // Restore the GC roots for the next GC.
1606 roots.clear();
1607 self.gc_roots_list = roots;
1608 }
1609
1610 #[inline]
1611 #[cfg(not(feature = "gc"))]
1612 pub fn gc(&mut self) {
1613 // Nothing to collect.
1614 //
1615 // Note that this is *not* a public method, this is just defined for the
1616 // crate-internal `StoreOpaque` type. This is a convenience so that we
1617 // don't have to `cfg` every call site.
1618 }
1619
1620 #[cfg(feature = "gc")]
1621 fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1622 log::trace!("Begin trace GC roots");
1623
1624 // We shouldn't have any leftover, stale GC roots.
1625 assert!(gc_roots_list.is_empty());
1626
1627 self.trace_wasm_stack_roots(gc_roots_list);
1628 self.trace_vmctx_roots(gc_roots_list);
1629 self.trace_user_roots(gc_roots_list);
1630
1631 log::trace!("End trace GC roots")
1632 }
1633
1634 #[cfg(all(feature = "async", feature = "gc"))]
1635 pub async fn gc_async(&mut self) {
1636 assert!(
1637 self.async_support(),
1638 "cannot use `gc_async` without enabling async support in the config",
1639 );
1640
1641 // If the GC heap hasn't been initialized, there is nothing to collect.
1642 if self.gc_store.is_none() {
1643 return;
1644 }
1645
1646 // Take the GC roots out of `self` so we can borrow it mutably but still
1647 // call mutable methods on `self`.
1648 let mut roots = std::mem::take(&mut self.gc_roots_list);
1649
1650 self.trace_roots_async(&mut roots).await;
1651 self.unwrap_gc_store_mut()
1652 .gc_async(unsafe { roots.iter() })
1653 .await;
1654
1655 // Restore the GC roots for the next GC.
1656 roots.clear();
1657 self.gc_roots_list = roots;
1658 }
1659
1660 #[inline]
1661 #[cfg(all(feature = "async", not(feature = "gc")))]
1662 pub async fn gc_async(&mut self) {
1663 // Nothing to collect.
1664 //
1665 // Note that this is *not* a public method, this is just defined for the
1666 // crate-internal `StoreOpaque` type. This is a convenience so that we
1667 // don't have to `cfg` every call site.
1668 }
1669
1670 #[cfg(all(feature = "async", feature = "gc"))]
1671 async fn trace_roots_async(&mut self, gc_roots_list: &mut GcRootsList) {
1672 use crate::runtime::vm::Yield;
1673
1674 log::trace!("Begin trace GC roots");
1675
1676 // We shouldn't have any leftover, stale GC roots.
1677 assert!(gc_roots_list.is_empty());
1678
1679 self.trace_wasm_stack_roots(gc_roots_list);
1680 Yield::new().await;
1681 self.trace_vmctx_roots(gc_roots_list);
1682 Yield::new().await;
1683 self.trace_user_roots(gc_roots_list);
1684
1685 log::trace!("End trace GC roots")
1686 }
1687
1688 #[cfg(feature = "gc")]
1689 fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1690 use core::ptr::NonNull;
1691
1692 use crate::runtime::vm::{ModuleInfoLookup, SendSyncPtr};
1693
1694 log::trace!("Begin trace GC roots :: Wasm stack");
1695
1696 Backtrace::trace(self.vmruntime_limits().cast_const(), |frame| {
1697 let pc = frame.pc();
1698 debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1699
1700 let fp = frame.fp() as *mut usize;
1701 debug_assert!(
1702 !fp.is_null(),
1703 "we should always get a valid frame pointer for Wasm frames"
1704 );
1705
1706 let module_info = self
1707 .modules()
1708 .lookup(pc)
1709 .expect("should have module info for Wasm frame");
1710
1711 let stack_map = match module_info.lookup_stack_map(pc) {
1712 Some(sm) => sm,
1713 None => {
1714 log::trace!("No stack map for this Wasm frame");
1715 return core::ops::ControlFlow::Continue(());
1716 }
1717 };
1718 log::trace!(
1719 "We have a stack map that maps {} bytes in this Wasm frame",
1720 stack_map.frame_size()
1721 );
1722
1723 let sp = unsafe { stack_map.sp(fp) };
1724 for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1725 let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1726 log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1727
1728 let gc_ref = VMGcRef::from_raw_u32(raw);
1729 if gc_ref.is_some() {
1730 unsafe {
1731 gc_roots_list.add_wasm_stack_root(SendSyncPtr::new(
1732 NonNull::new(stack_slot).unwrap(),
1733 ));
1734 }
1735 }
1736 }
1737
1738 core::ops::ControlFlow::Continue(())
1739 });
1740
1741 log::trace!("End trace GC roots :: Wasm stack");
1742 }
1743
1744 #[cfg(feature = "gc")]
1745 fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1746 log::trace!("Begin trace GC roots :: vmctx");
1747 self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1748 self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1749 log::trace!("End trace GC roots :: vmctx");
1750 }
1751
1752 #[cfg(feature = "gc")]
1753 fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1754 log::trace!("Begin trace GC roots :: user");
1755 self.gc_roots.trace_roots(gc_roots_list);
1756 log::trace!("End trace GC roots :: user");
1757 }
1758
1759 /// Insert a host-allocated GC type into this store.
1760 ///
1761 /// This makes it suitable for the embedder to allocate instances of this
1762 /// type in this store, and we don't have to worry about the type being
1763 /// reclaimed (since it is possible that none of the Wasm modules in this
1764 /// store are holding it alive).
1765 pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: RegisteredType) {
1766 self.gc_host_alloc_types.insert(ty);
1767 }
1768
1769 /// Yields the async context, assuming that we are executing on a fiber and
1770 /// that fiber is not in the process of dying. This function will return
1771 /// None in the latter case (the fiber is dying), and panic if
1772 /// `async_support()` is false.
1773 #[cfg(feature = "async")]
1774 #[inline]
1775 pub fn async_cx(&self) -> Option<AsyncCx> {
1776 assert!(self.async_support());
1777
1778 let poll_cx_box_ptr = self.async_state.current_poll_cx.get();
1779 if poll_cx_box_ptr.is_null() {
1780 return None;
1781 }
1782
1783 let poll_cx_inner_ptr = unsafe { *poll_cx_box_ptr };
1784 if poll_cx_inner_ptr.is_null() {
1785 return None;
1786 }
1787
1788 Some(AsyncCx {
1789 current_suspend: self.async_state.current_suspend.get(),
1790 current_poll_cx: poll_cx_box_ptr,
1791 track_pkey_context_switch: self.pkey.is_some(),
1792 })
1793 }
1794
1795 pub fn get_fuel(&self) -> Result<u64> {
1796 anyhow::ensure!(
1797 self.engine().tunables().consume_fuel,
1798 "fuel is not configured in this store"
1799 );
1800 let injected_fuel = unsafe { *self.runtime_limits.fuel_consumed.get() };
1801 Ok(get_fuel(injected_fuel, self.fuel_reserve))
1802 }
1803
1804 fn refuel(&mut self) -> bool {
1805 let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1806 refuel(
1807 injected_fuel,
1808 &mut self.fuel_reserve,
1809 self.fuel_yield_interval,
1810 )
1811 }
1812
1813 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1814 anyhow::ensure!(
1815 self.engine().tunables().consume_fuel,
1816 "fuel is not configured in this store"
1817 );
1818 let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1819 set_fuel(
1820 injected_fuel,
1821 &mut self.fuel_reserve,
1822 self.fuel_yield_interval,
1823 fuel,
1824 );
1825 Ok(())
1826 }
1827
1828 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1829 anyhow::ensure!(
1830 self.engine().tunables().consume_fuel,
1831 "fuel is not configured in this store"
1832 );
1833 anyhow::ensure!(
1834 self.engine().config().async_support,
1835 "async support is not configured in this store"
1836 );
1837 anyhow::ensure!(
1838 interval != Some(0),
1839 "fuel_async_yield_interval must not be 0"
1840 );
1841 self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1842 // Reset the fuel active + reserve states by resetting the amount.
1843 self.set_fuel(self.get_fuel()?)
1844 }
1845
1846 /// Yields execution to the caller on out-of-gas or epoch interruption.
1847 ///
1848 /// This only works on async futures and stores, and assumes that we're
1849 /// executing on a fiber. This will yield execution back to the caller once.
1850 #[cfg(feature = "async")]
1851 fn async_yield_impl(&mut self) -> Result<()> {
1852 use crate::runtime::vm::Yield;
1853
1854 let mut future = Yield::new();
1855
1856 // When control returns, we have a `Result<()>` passed
1857 // in from the host fiber. If this finished successfully then
1858 // we were resumed normally via a `poll`, so keep going. If
1859 // the future was dropped while we were yielded, then we need
1860 // to clean up this fiber. Do so by raising a trap which will
1861 // abort all wasm and get caught on the other side to clean
1862 // things up.
1863 unsafe {
1864 self.async_cx()
1865 .expect("attempted to pull async context during shutdown")
1866 .block_on(Pin::new_unchecked(&mut future))
1867 }
1868 }
1869
1870 #[inline]
1871 pub fn signal_handler(&self) -> Option<*const SignalHandler<'static>> {
1872 let handler = self.signal_handler.as_ref()?;
1873 Some(&**handler as *const _)
1874 }
1875
1876 #[inline]
1877 pub fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
1878 &self.runtime_limits as *const VMRuntimeLimits as *mut VMRuntimeLimits
1879 }
1880
1881 #[inline]
1882 pub fn default_caller(&self) -> *mut VMContext {
1883 self.default_caller.vmctx()
1884 }
1885
1886 pub fn traitobj(&self) -> *mut dyn crate::runtime::vm::Store {
1887 self.default_caller.store()
1888 }
1889
1890 /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1891 /// used as part of calling the host in a `Func::new` method invocation.
1892 #[inline]
1893 pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1894 mem::take(&mut self.hostcall_val_storage)
1895 }
1896
1897 /// Restores the vector previously taken by `take_hostcall_val_storage`
1898 /// above back into the store, allowing it to be used in the future for the
1899 /// next wasm->host call.
1900 #[inline]
1901 pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1902 if storage.capacity() > self.hostcall_val_storage.capacity() {
1903 self.hostcall_val_storage = storage;
1904 }
1905 }
1906
1907 /// Same as `take_hostcall_val_storage`, but for the direction of the host
1908 /// calling wasm.
1909 #[inline]
1910 pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1911 mem::take(&mut self.wasm_val_raw_storage)
1912 }
1913
1914 /// Same as `save_hostcall_val_storage`, but for the direction of the host
1915 /// calling wasm.
1916 #[inline]
1917 pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1918 if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1919 self.wasm_val_raw_storage = storage;
1920 }
1921 }
1922
1923 pub(crate) fn push_rooted_funcs(&mut self, funcs: Arc<[Definition]>) {
1924 self.rooted_host_funcs.push(funcs);
1925 }
1926
1927 /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1928 /// WebAssembly-relative fault.
1929 ///
1930 /// This function may abort the process if `addr` is not found to actually
1931 /// reside in any linear memory. In such a situation it means that the
1932 /// segfault was erroneously caught by Wasmtime and is possibly indicative
1933 /// of a code generator bug.
1934 ///
1935 /// This function returns `None` for dynamically-bounds-checked-memories
1936 /// with spectre mitigations enabled since the hardware fault address is
1937 /// always zero in these situations which means that the trapping context
1938 /// doesn't have enough information to report the fault address.
1939 pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<WasmFault> {
1940 // There are a few instances where a "close to zero" pointer is loaded
1941 // and we expect that to happen:
1942 //
1943 // * Explicitly bounds-checked memories with spectre-guards enabled will
1944 // cause out-of-bounds accesses to get routed to address 0, so allow
1945 // wasm instructions to fault on the null address.
1946 // * `call_indirect` when invoking a null function pointer may load data
1947 // from the a `VMFuncRef` whose address is null, meaning any field of
1948 // `VMFuncRef` could be the address of the fault.
1949 //
1950 // In these situations where the address is so small it won't be in any
1951 // instance, so skip the checks below.
1952 if addr <= mem::size_of::<VMFuncRef>() {
1953 const _: () = {
1954 // static-assert that `VMFuncRef` isn't too big to ensure that
1955 // it lives solely within the first page as we currently only
1956 // have the guarantee that the first page of memory is unmapped,
1957 // no more.
1958 assert!(mem::size_of::<VMFuncRef>() <= 512);
1959 };
1960 return None;
1961 }
1962
1963 // Search all known instances in this store for this address. Note that
1964 // this is probably not the speediest way to do this. Traps, however,
1965 // are generally not expected to be super fast and additionally stores
1966 // probably don't have all that many instances or memories.
1967 //
1968 // If this loop becomes hot in the future, however, it should be
1969 // possible to precompute maps about linear memories in a store and have
1970 // a quicker lookup.
1971 let mut fault = None;
1972 for instance in self.instances.iter() {
1973 if let Some(f) = instance.handle.wasm_fault(addr) {
1974 assert!(fault.is_none());
1975 fault = Some(f);
1976 }
1977 }
1978 if fault.is_some() {
1979 return fault;
1980 }
1981
1982 cfg_if::cfg_if! {
1983 if #[cfg(any(feature = "std", unix, windows))] {
1984 // With the standard library a rich error can be printed here
1985 // to stderr and the native abort path is used.
1986 eprintln!(
1987 "\
1988Wasmtime caught a segfault for a wasm program because the faulting instruction
1989is allowed to segfault due to how linear memories are implemented. The address
1990that was accessed, however, is not known to any linear memory in use within this
1991Store. This may be indicative of a critical bug in Wasmtime's code generation
1992because all addresses which are known to be reachable from wasm won't reach this
1993message.
1994
1995 pc: 0x{pc:x}
1996 address: 0x{addr:x}
1997
1998This is a possible security issue because WebAssembly has accessed something it
1999shouldn't have been able to. Other accesses may have succeeded and this one just
2000happened to be caught. The process will now be aborted to prevent this damage
2001from going any further and to alert what's going on. If this is a security
2002issue please reach out to the Wasmtime team via its security policy
2003at https://bytecodealliance.org/security.
2004"
2005 );
2006 std::process::abort();
2007 } else if #[cfg(panic = "abort")] {
2008 // Without the standard library but with `panic=abort` then
2009 // it's safe to panic as that's known to halt execution. For
2010 // now avoid the above error message as well since without
2011 // `std` it's probably best to be a bit more size-conscious.
2012 let _ = pc;
2013 panic!("invalid fault");
2014 } else {
2015 // Without `std` and with `panic = "unwind"` there's no way to
2016 // abort the process portably, so flag a compile time error.
2017 //
2018 // NB: if this becomes a problem in the future one option would
2019 // be to extend the `capi.rs` module for no_std platforms, but
2020 // it remains yet to be seen at this time if this is hit much.
2021 compile_error!("either `std` or `panic=abort` must be enabled");
2022 None
2023 }
2024 }
2025 }
2026
2027 /// Retrieve the store's protection key.
2028 #[inline]
2029 pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2030 self.pkey
2031 }
2032
2033 #[inline]
2034 #[cfg(feature = "component-model")]
2035 pub(crate) fn component_resource_state(
2036 &mut self,
2037 ) -> (
2038 &mut crate::runtime::vm::component::CallContexts,
2039 &mut crate::runtime::vm::component::ResourceTable,
2040 &mut crate::component::HostResourceData,
2041 ) {
2042 (
2043 &mut self.component_calls,
2044 &mut self.component_host_table,
2045 &mut self.host_resource_data,
2046 )
2047 }
2048
2049 #[cfg(feature = "component-model")]
2050 pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2051 // We don't actually need the instance itself right now, but it seems
2052 // like something we will almost certainly eventually want to keep
2053 // around, so force callers to provide it.
2054 let _ = instance;
2055
2056 self.num_component_instances += 1;
2057 }
2058}
2059
2060impl<T> StoreContextMut<'_, T> {
2061 /// Executes a synchronous computation `func` asynchronously on a new fiber.
2062 ///
2063 /// This function will convert the synchronous `func` into an asynchronous
2064 /// future. This is done by running `func` in a fiber on a separate native
2065 /// stack which can be suspended and resumed from.
2066 ///
2067 /// Most of the nitty-gritty here is how we juggle the various contexts
2068 /// necessary to suspend the fiber later on and poll sub-futures. It's hoped
2069 /// that the various comments are illuminating as to what's going on here.
2070 #[cfg(feature = "async")]
2071 pub(crate) async fn on_fiber<R>(
2072 &mut self,
2073 func: impl FnOnce(&mut StoreContextMut<'_, T>) -> R + Send,
2074 ) -> Result<R>
2075 where
2076 T: Send,
2077 {
2078 let config = self.engine().config();
2079 debug_assert!(self.0.async_support());
2080 debug_assert!(config.async_stack_size > 0);
2081
2082 let mut slot = None;
2083 let future = {
2084 let current_poll_cx = self.0.async_state.current_poll_cx.get();
2085 let current_suspend = self.0.async_state.current_suspend.get();
2086 let stack = self.engine().allocator().allocate_fiber_stack()?;
2087
2088 let engine = self.engine().clone();
2089 let slot = &mut slot;
2090 let fiber = wasmtime_fiber::Fiber::new(stack, move |keep_going, suspend| {
2091 // First check and see if we were interrupted/dropped, and only
2092 // continue if we haven't been.
2093 keep_going?;
2094
2095 // Configure our store's suspension context for the rest of the
2096 // execution of this fiber. Note that a raw pointer is stored here
2097 // which is only valid for the duration of this closure.
2098 // Consequently we at least replace it with the previous value when
2099 // we're done. This reset is also required for correctness because
2100 // otherwise our value will overwrite another active fiber's value.
2101 // There should be a test that segfaults in `async_functions.rs` if
2102 // this `Replace` is removed.
2103 unsafe {
2104 let _reset = Reset(current_suspend, *current_suspend);
2105 *current_suspend = suspend;
2106
2107 *slot = Some(func(self));
2108 Ok(())
2109 }
2110 })?;
2111
2112 // Once we have the fiber representing our synchronous computation, we
2113 // wrap that in a custom future implementation which does the
2114 // translation from the future protocol to our fiber API.
2115 FiberFuture {
2116 fiber: Some(fiber),
2117 current_poll_cx,
2118 engine,
2119 state: Some(crate::runtime::vm::AsyncWasmCallState::new()),
2120 }
2121 };
2122 future.await?;
2123
2124 return Ok(slot.unwrap());
2125
2126 struct FiberFuture<'a> {
2127 fiber: Option<wasmtime_fiber::Fiber<'a, Result<()>, (), Result<()>>>,
2128 current_poll_cx: *mut *mut Context<'static>,
2129 engine: Engine,
2130 // See comments in `FiberFuture::resume` for this
2131 state: Option<crate::runtime::vm::AsyncWasmCallState>,
2132 }
2133
2134 // This is surely the most dangerous `unsafe impl Send` in the entire
2135 // crate. There are two members in `FiberFuture` which cause it to not
2136 // be `Send`. One is `current_poll_cx` and is entirely uninteresting.
2137 // This is just used to manage `Context` pointers across `await` points
2138 // in the future, and requires raw pointers to get it to happen easily.
2139 // Nothing too weird about the `Send`-ness, values aren't actually
2140 // crossing threads.
2141 //
2142 // The really interesting piece is `fiber`. Now the "fiber" here is
2143 // actual honest-to-god Rust code which we're moving around. What we're
2144 // doing is the equivalent of moving our thread's stack to another OS
2145 // thread. Turns out we, in general, have no idea what's on the stack
2146 // and would generally have no way to verify that this is actually safe
2147 // to do!
2148 //
2149 // Thankfully, though, Wasmtime has the power. Without being glib it's
2150 // actually worth examining what's on the stack. It's unfortunately not
2151 // super-local to this function itself. Our closure to `Fiber::new` runs
2152 // `func`, which is given to us from the outside. Thankfully, though, we
2153 // have tight control over this. Usage of `on_fiber` is typically done
2154 // *just* before entering WebAssembly itself, so we'll have a few stack
2155 // frames of Rust code (all in Wasmtime itself) before we enter wasm.
2156 //
2157 // Once we've entered wasm, well then we have a whole bunch of wasm
2158 // frames on the stack. We've got this nifty thing called Cranelift,
2159 // though, which allows us to also have complete control over everything
2160 // on the stack!
2161 //
2162 // Finally, when wasm switches back to the fiber's starting pointer
2163 // (this future we're returning) then it means wasm has reentered Rust.
2164 // Suspension can only happen via the `block_on` function of an
2165 // `AsyncCx`. This, conveniently, also happens entirely in Wasmtime
2166 // controlled code!
2167 //
2168 // There's an extremely important point that should be called out here.
2169 // User-provided futures **are not on the stack** during suspension
2170 // points. This is extremely crucial because we in general cannot reason
2171 // about Send/Sync for stack-local variables since rustc doesn't analyze
2172 // them at all. With our construction, though, we are guaranteed that
2173 // Wasmtime owns all stack frames between the stack of a fiber and when
2174 // the fiber suspends (and it could move across threads). At this time
2175 // the only user-provided piece of data on the stack is the future
2176 // itself given to us. Lo-and-behold as you might notice the future is
2177 // required to be `Send`!
2178 //
2179 // What this all boils down to is that we, as the authors of Wasmtime,
2180 // need to be extremely careful that on the async fiber stack we only
2181 // store Send things. For example we can't start using `Rc` willy nilly
2182 // by accident and leave a copy in TLS somewhere. (similarly we have to
2183 // be ready for TLS to change while we're executing wasm code between
2184 // suspension points).
2185 //
2186 // While somewhat onerous it shouldn't be too too hard (the TLS bit is
2187 // the hardest bit so far). This does mean, though, that no user should
2188 // ever have to worry about the `Send`-ness of Wasmtime. If rustc says
2189 // it's ok, then it's ok.
2190 //
2191 // With all that in mind we unsafely assert here that wasmtime is
2192 // correct. We declare the fiber as only containing Send data on its
2193 // stack, despite not knowing for sure at compile time that this is
2194 // correct. That's what `unsafe` in Rust is all about, though, right?
2195 unsafe impl Send for FiberFuture<'_> {}
2196
2197 impl FiberFuture<'_> {
2198 fn fiber(&self) -> &wasmtime_fiber::Fiber<'_, Result<()>, (), Result<()>> {
2199 self.fiber.as_ref().unwrap()
2200 }
2201
2202 /// This is a helper function to call `resume` on the underlying
2203 /// fiber while correctly managing Wasmtime's thread-local data.
2204 ///
2205 /// Wasmtime's implementation of traps leverages thread-local data
2206 /// to get access to metadata during a signal. This thread-local
2207 /// data is a linked list of "activations" where the nodes of the
2208 /// linked list are stored on the stack. It would be invalid as a
2209 /// result to suspend a computation with the head of the linked list
2210 /// on this stack then move the stack to another thread and resume
2211 /// it. That means that a different thread would point to our stack
2212 /// and our thread doesn't point to our stack at all!
2213 ///
2214 /// Basically management of TLS is required here one way or another.
2215 /// The strategy currently settled on is to manage the list of
2216 /// activations created by this fiber as a unit. When a fiber
2217 /// resumes the linked list is prepended to the current thread's
2218 /// list. When the fiber is suspended then the fiber's list of
2219 /// activations are all removed en-masse and saved within the fiber.
2220 fn resume(&mut self, val: Result<()>) -> Result<Result<()>, ()> {
2221 unsafe {
2222 let prev = self.state.take().unwrap().push();
2223 let restore = Restore {
2224 fiber: self,
2225 state: Some(prev),
2226 };
2227 return restore.fiber.fiber().resume(val);
2228 }
2229
2230 struct Restore<'a, 'b> {
2231 fiber: &'a mut FiberFuture<'b>,
2232 state: Option<crate::runtime::vm::PreviousAsyncWasmCallState>,
2233 }
2234
2235 impl Drop for Restore<'_, '_> {
2236 fn drop(&mut self) {
2237 unsafe {
2238 self.fiber.state = Some(self.state.take().unwrap().restore());
2239 }
2240 }
2241 }
2242 }
2243 }
2244
2245 impl Future for FiberFuture<'_> {
2246 type Output = Result<()>;
2247
2248 fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
2249 // We need to carry over this `cx` into our fiber's runtime
2250 // for when it tries to poll sub-futures that are created. Doing
2251 // this must be done unsafely, however, since `cx` is only alive
2252 // for this one singular function call. Here we do a `transmute`
2253 // to extend the lifetime of `Context` so it can be stored in
2254 // our `Store`, and then we replace the current polling context
2255 // with this one.
2256 //
2257 // Note that the replace is done for weird situations where
2258 // futures might be switching contexts and there's multiple
2259 // wasmtime futures in a chain of futures.
2260 //
2261 // On exit from this function, though, we reset the polling
2262 // context back to what it was to signify that `Store` no longer
2263 // has access to this pointer.
2264 unsafe {
2265 let _reset = Reset(self.current_poll_cx, *self.current_poll_cx);
2266 *self.current_poll_cx =
2267 core::mem::transmute::<&mut Context<'_>, *mut Context<'static>>(cx);
2268
2269 // After that's set up we resume execution of the fiber, which
2270 // may also start the fiber for the first time. This either
2271 // returns `Ok` saying the fiber finished (yay!) or it
2272 // returns `Err` with the payload passed to `suspend`, which
2273 // in our case is `()`.
2274 match self.resume(Ok(())) {
2275 Ok(result) => Poll::Ready(result),
2276
2277 // If `Err` is returned that means the fiber polled a
2278 // future but it said "Pending", so we propagate that
2279 // here.
2280 //
2281 // An additional safety check is performed when leaving
2282 // this function to help bolster the guarantees of
2283 // `unsafe impl Send` above. Notably this future may get
2284 // re-polled on a different thread. Wasmtime's
2285 // thread-local state points to the stack, however,
2286 // meaning that it would be incorrect to leave a pointer
2287 // in TLS when this function returns. This function
2288 // performs a runtime assert to verify that this is the
2289 // case, notably that the one TLS pointer Wasmtime uses
2290 // is not pointing anywhere within the stack. If it is
2291 // then that's a bug indicating that TLS management in
2292 // Wasmtime is incorrect.
2293 Err(()) => {
2294 if let Some(range) = self.fiber().stack().range() {
2295 crate::runtime::vm::AsyncWasmCallState::assert_current_state_not_in_range(range);
2296 }
2297 Poll::Pending
2298 }
2299 }
2300 }
2301 }
2302 }
2303
2304 // Dropping futures is pretty special in that it means the future has
2305 // been requested to be cancelled. Here we run the risk of dropping an
2306 // in-progress fiber, and if we were to do nothing then the fiber would
2307 // leak all its owned stack resources.
2308 //
2309 // To handle this we implement `Drop` here and, if the fiber isn't done,
2310 // resume execution of the fiber saying "hey please stop you're
2311 // interrupted". Our `Trap` created here (which has the stack trace
2312 // of whomever dropped us) will then get propagated in whatever called
2313 // `block_on`, and the idea is that the trap propagates all the way back
2314 // up to the original fiber start, finishing execution.
2315 //
2316 // We don't actually care about the fiber's return value here (no one's
2317 // around to look at it), we just assert the fiber finished to
2318 // completion.
2319 impl Drop for FiberFuture<'_> {
2320 fn drop(&mut self) {
2321 if !self.fiber().done() {
2322 let result = self.resume(Err(anyhow!("future dropped")));
2323 // This resumption with an error should always complete the
2324 // fiber. While it's technically possible for host code to catch
2325 // the trap and re-resume, we'd ideally like to signal that to
2326 // callers that they shouldn't be doing that.
2327 debug_assert!(result.is_ok());
2328 }
2329
2330 self.state.take().unwrap().assert_null();
2331
2332 unsafe {
2333 self.engine
2334 .allocator()
2335 .deallocate_fiber_stack(self.fiber.take().unwrap().into_stack());
2336 }
2337 }
2338 }
2339 }
2340}
2341
2342#[cfg(feature = "async")]
2343pub struct AsyncCx {
2344 current_suspend: *mut *mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
2345 current_poll_cx: *mut *mut Context<'static>,
2346 track_pkey_context_switch: bool,
2347}
2348
2349#[cfg(feature = "async")]
2350impl AsyncCx {
2351 /// Blocks on the asynchronous computation represented by `future` and
2352 /// produces the result here, in-line.
2353 ///
2354 /// This function is designed to only work when it's currently executing on
2355 /// a native fiber. This fiber provides the ability for us to handle the
2356 /// future's `Pending` state as "jump back to whomever called the fiber in
2357 /// an asynchronous fashion and propagate `Pending`". This tight coupling
2358 /// with `on_fiber` below is what powers the asynchronicity of calling wasm.
2359 /// Note that the asynchronous part only applies to host functions, wasm
2360 /// itself never really does anything asynchronous at this time.
2361 ///
2362 /// This function takes a `future` and will (appear to) synchronously wait
2363 /// on the result. While this function is executing it will fiber switch
2364 /// to-and-from the original frame calling `on_fiber` which should be a
2365 /// guarantee due to how async stores are configured.
2366 ///
2367 /// The return value here is either the output of the future `T`, or a trap
2368 /// which represents that the asynchronous computation was cancelled. It is
2369 /// not recommended to catch the trap and try to keep executing wasm, so
2370 /// we've tried to liberally document this.
2371 pub unsafe fn block_on<U>(
2372 &self,
2373 mut future: Pin<&mut (dyn Future<Output = U> + Send)>,
2374 ) -> Result<U> {
2375 // Take our current `Suspend` context which was configured as soon as
2376 // our fiber started. Note that we must load it at the front here and
2377 // save it on our stack frame. While we're polling the future other
2378 // fibers may be started for recursive computations, and the current
2379 // suspend context is only preserved at the edges of the fiber, not
2380 // during the fiber itself.
2381 //
2382 // For a little bit of extra safety we also replace the current value
2383 // with null to try to catch any accidental bugs on our part early.
2384 // This is all pretty unsafe so we're trying to be careful...
2385 //
2386 // Note that there should be a segfaulting test in `async_functions.rs`
2387 // if this `Reset` is removed.
2388 let suspend = *self.current_suspend;
2389 let _reset = Reset(self.current_suspend, suspend);
2390 *self.current_suspend = ptr::null_mut();
2391 assert!(!suspend.is_null());
2392
2393 loop {
2394 let future_result = {
2395 let poll_cx = *self.current_poll_cx;
2396 let _reset = Reset(self.current_poll_cx, poll_cx);
2397 *self.current_poll_cx = ptr::null_mut();
2398 assert!(!poll_cx.is_null());
2399 future.as_mut().poll(&mut *poll_cx)
2400 };
2401
2402 match future_result {
2403 Poll::Ready(t) => break Ok(t),
2404 Poll::Pending => {}
2405 }
2406
2407 // In order to prevent this fiber's MPK state from being munged by
2408 // other fibers while it is suspended, we save and restore it once
2409 // once execution resumes. Note that when MPK is not supported,
2410 // these are noops.
2411 let previous_mask = if self.track_pkey_context_switch {
2412 let previous_mask = mpk::current_mask();
2413 mpk::allow(ProtectionMask::all());
2414 previous_mask
2415 } else {
2416 ProtectionMask::all()
2417 };
2418 (*suspend).suspend(())?;
2419 if self.track_pkey_context_switch {
2420 mpk::allow(previous_mask);
2421 }
2422 }
2423 }
2424}
2425
2426unsafe impl<T> crate::runtime::vm::Store for StoreInner<T> {
2427 fn vmruntime_limits(&self) -> *mut VMRuntimeLimits {
2428 <StoreOpaque>::vmruntime_limits(self)
2429 }
2430
2431 fn epoch_ptr(&self) -> *const AtomicU64 {
2432 self.engine.epoch_counter() as *const _
2433 }
2434
2435 fn maybe_gc_store(&mut self) -> Option<&mut GcStore> {
2436 self.gc_store.as_mut()
2437 }
2438
2439 fn memory_growing(
2440 &mut self,
2441 current: usize,
2442 desired: usize,
2443 maximum: Option<usize>,
2444 ) -> Result<bool, anyhow::Error> {
2445 match self.limiter {
2446 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2447 limiter(&mut self.data).memory_growing(current, desired, maximum)
2448 }
2449 #[cfg(feature = "async")]
2450 Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2451 self.inner
2452 .async_cx()
2453 .expect("ResourceLimiterAsync requires async Store")
2454 .block_on(
2455 limiter(&mut self.data)
2456 .memory_growing(current, desired, maximum)
2457 .as_mut(),
2458 )?
2459 },
2460 None => Ok(true),
2461 }
2462 }
2463
2464 fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2465 match self.limiter {
2466 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2467 limiter(&mut self.data).memory_grow_failed(error)
2468 }
2469 #[cfg(feature = "async")]
2470 Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2471 limiter(&mut self.data).memory_grow_failed(error)
2472 }
2473 None => {
2474 log::debug!("ignoring memory growth failure error: {error:?}");
2475 Ok(())
2476 }
2477 }
2478 }
2479
2480 fn table_growing(
2481 &mut self,
2482 current: u32,
2483 desired: u32,
2484 maximum: Option<u32>,
2485 ) -> Result<bool, anyhow::Error> {
2486 // Need to borrow async_cx before the mut borrow of the limiter.
2487 // self.async_cx() panicks when used with a non-async store, so
2488 // wrap this in an option.
2489 #[cfg(feature = "async")]
2490 let async_cx = if self.async_support()
2491 && matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
2492 {
2493 Some(self.async_cx().unwrap())
2494 } else {
2495 None
2496 };
2497
2498 match self.limiter {
2499 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2500 limiter(&mut self.data).table_growing(current, desired, maximum)
2501 }
2502 #[cfg(feature = "async")]
2503 Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2504 async_cx
2505 .expect("ResourceLimiterAsync requires async Store")
2506 .block_on(
2507 limiter(&mut self.data)
2508 .table_growing(current, desired, maximum)
2509 .as_mut(),
2510 )?
2511 },
2512 None => Ok(true),
2513 }
2514 }
2515
2516 fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2517 match self.limiter {
2518 Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2519 limiter(&mut self.data).table_grow_failed(error)
2520 }
2521 #[cfg(feature = "async")]
2522 Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2523 limiter(&mut self.data).table_grow_failed(error)
2524 }
2525 None => {
2526 log::debug!("ignoring table growth failure: {error:?}");
2527 Ok(())
2528 }
2529 }
2530 }
2531
2532 fn out_of_gas(&mut self) -> Result<()> {
2533 if !self.refuel() {
2534 return Err(Trap::OutOfFuel).err2anyhow();
2535 }
2536 #[cfg(feature = "async")]
2537 if self.fuel_yield_interval.is_some() {
2538 self.async_yield_impl()?;
2539 }
2540 Ok(())
2541 }
2542
2543 fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2544 // Temporarily take the configured behavior to avoid mutably borrowing
2545 // multiple times.
2546 let mut behavior = self.epoch_deadline_behavior.take();
2547 let delta_result = match &mut behavior {
2548 None => Err(Trap::Interrupt).err2anyhow(),
2549 Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2550 let delta = match update {
2551 UpdateDeadline::Continue(delta) => delta,
2552
2553 #[cfg(feature = "async")]
2554 UpdateDeadline::Yield(delta) => {
2555 assert!(
2556 self.async_support(),
2557 "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2558 );
2559 // Do the async yield. May return a trap if future was
2560 // canceled while we're yielded.
2561 self.async_yield_impl()?;
2562 delta
2563 }
2564 };
2565
2566 // Set a new deadline and return the new epoch deadline so
2567 // the Wasm code doesn't have to reload it.
2568 self.set_epoch_deadline(delta);
2569 Ok(self.get_epoch_deadline())
2570 })
2571 };
2572
2573 // Put back the original behavior which was replaced by `take`.
2574 self.epoch_deadline_behavior = behavior;
2575 delta_result
2576 }
2577
2578 #[cfg(feature = "gc")]
2579 fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2580 let mut scope = RootScope::new(self);
2581 let store = scope.as_context_mut().0;
2582 let store_id = store.id();
2583 let root = root.map(|r| store.gc_roots_mut().push_lifo_root(store_id, r));
2584
2585 if store.async_support() {
2586 #[cfg(feature = "async")]
2587 unsafe {
2588 let async_cx = store.async_cx();
2589 let mut future = store.gc_async();
2590 async_cx
2591 .expect("attempted to pull async context during shutdown")
2592 .block_on(Pin::new_unchecked(&mut future))?;
2593 }
2594 } else {
2595 (**store).gc();
2596 }
2597
2598 let root = match root {
2599 None => None,
2600 Some(r) => {
2601 let r = r
2602 .get_gc_ref(store)
2603 .expect("still in scope")
2604 .unchecked_copy();
2605 Some(store.gc_store_mut()?.clone_gc_ref(&r))
2606 }
2607 };
2608
2609 Ok(root)
2610 }
2611
2612 #[cfg(not(feature = "gc"))]
2613 fn gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2614 Ok(root)
2615 }
2616
2617 #[cfg(feature = "component-model")]
2618 fn component_calls(&mut self) -> &mut crate::runtime::vm::component::CallContexts {
2619 &mut self.component_calls
2620 }
2621}
2622
2623impl<T> StoreInner<T> {
2624 pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2625 // Set a new deadline based on the "epoch deadline delta".
2626 //
2627 // Safety: this is safe because the epoch deadline in the
2628 // `VMRuntimeLimits` is accessed only here and by Wasm guest code
2629 // running in this store, and we have a `&mut self` here.
2630 //
2631 // Also, note that when this update is performed while Wasm is
2632 // on the stack, the Wasm will reload the new value once we
2633 // return into it.
2634 let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
2635 *epoch_deadline = self.engine().current_epoch() + delta;
2636 }
2637
2638 fn epoch_deadline_trap(&mut self) {
2639 self.epoch_deadline_behavior = None;
2640 }
2641
2642 fn epoch_deadline_callback(
2643 &mut self,
2644 callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2645 ) {
2646 self.epoch_deadline_behavior = Some(callback);
2647 }
2648
2649 fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
2650 assert!(
2651 self.async_support(),
2652 "cannot use `epoch_deadline_async_yield_and_update` without enabling async support in the config"
2653 );
2654 #[cfg(feature = "async")]
2655 {
2656 self.epoch_deadline_behavior =
2657 Some(Box::new(move |_store| Ok(UpdateDeadline::Yield(delta))));
2658 }
2659 let _ = delta; // suppress warning in non-async build
2660 }
2661
2662 fn get_epoch_deadline(&self) -> u64 {
2663 // Safety: this is safe because, as above, it is only invoked
2664 // from within `new_epoch` which is called from guest Wasm
2665 // code, which will have an exclusive borrow on the Store.
2666 let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() };
2667 *epoch_deadline
2668 }
2669}
2670
2671impl<T: Default> Default for Store<T> {
2672 fn default() -> Store<T> {
2673 Store::new(&Engine::default(), T::default())
2674 }
2675}
2676
2677impl<T: fmt::Debug> fmt::Debug for Store<T> {
2678 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2679 let inner = &**self.inner as *const StoreInner<T>;
2680 f.debug_struct("Store")
2681 .field("inner", &inner)
2682 .field("data", &self.inner.data)
2683 .finish()
2684 }
2685}
2686
2687impl<T> Drop for Store<T> {
2688 fn drop(&mut self) {
2689 // for documentation on this `unsafe`, see `into_data`.
2690 unsafe {
2691 ManuallyDrop::drop(&mut self.inner.data);
2692 ManuallyDrop::drop(&mut self.inner);
2693 }
2694 }
2695}
2696
2697impl Drop for StoreOpaque {
2698 fn drop(&mut self) {
2699 // NB it's important that this destructor does not access `self.data`.
2700 // That is deallocated by `Drop for Store<T>` above.
2701
2702 unsafe {
2703 let allocator = self.engine.allocator();
2704 let ondemand = OnDemandInstanceAllocator::default();
2705 for instance in self.instances.iter_mut() {
2706 if let StoreInstanceKind::Dummy = instance.kind {
2707 ondemand.deallocate_module(&mut instance.handle);
2708 } else {
2709 allocator.deallocate_module(&mut instance.handle);
2710 }
2711 }
2712 ondemand.deallocate_module(&mut self.default_caller);
2713
2714 #[cfg(feature = "gc")]
2715 if let Some(gc_store) = self.gc_store.take() {
2716 if self.engine.features().gc_types() {
2717 allocator.deallocate_gc_heap(gc_store.allocation_index, gc_store.gc_heap);
2718 } else {
2719 // If GC types are not enabled, we are just dealing with a
2720 // dummy GC heap.
2721 debug_assert_eq!(gc_store.allocation_index, GcHeapAllocationIndex::default());
2722 debug_assert!(gc_store.gc_heap.as_any().is::<crate::vm::DisabledGcHeap>());
2723 }
2724 }
2725
2726 #[cfg(feature = "component-model")]
2727 {
2728 for _ in 0..self.num_component_instances {
2729 allocator.decrement_component_instance_count();
2730 }
2731 }
2732
2733 // See documentation for these fields on `StoreOpaque` for why they
2734 // must be dropped in this order.
2735 ManuallyDrop::drop(&mut self.store_data);
2736 ManuallyDrop::drop(&mut self.rooted_host_funcs);
2737 }
2738 }
2739}
2740
2741impl crate::runtime::vm::ModuleInfoLookup for ModuleRegistry {
2742 fn lookup(&self, pc: usize) -> Option<&dyn crate::runtime::vm::ModuleInfo> {
2743 self.lookup_module_info(pc)
2744 }
2745}
2746
2747struct Reset<T: Copy>(*mut T, T);
2748
2749impl<T: Copy> Drop for Reset<T> {
2750 fn drop(&mut self) {
2751 unsafe {
2752 *self.0 = self.1;
2753 }
2754 }
2755}
2756
2757#[cfg(test)]
2758mod tests {
2759 use super::{get_fuel, refuel, set_fuel};
2760 use std::num::NonZeroU64;
2761
2762 struct FuelTank {
2763 pub consumed_fuel: i64,
2764 pub reserve_fuel: u64,
2765 pub yield_interval: Option<NonZeroU64>,
2766 }
2767
2768 impl FuelTank {
2769 fn new() -> Self {
2770 FuelTank {
2771 consumed_fuel: 0,
2772 reserve_fuel: 0,
2773 yield_interval: None,
2774 }
2775 }
2776 fn get_fuel(&self) -> u64 {
2777 get_fuel(self.consumed_fuel, self.reserve_fuel)
2778 }
2779 fn refuel(&mut self) -> bool {
2780 refuel(
2781 &mut self.consumed_fuel,
2782 &mut self.reserve_fuel,
2783 self.yield_interval,
2784 )
2785 }
2786 fn set_fuel(&mut self, fuel: u64) {
2787 set_fuel(
2788 &mut self.consumed_fuel,
2789 &mut self.reserve_fuel,
2790 self.yield_interval,
2791 fuel,
2792 );
2793 }
2794 }
2795
2796 #[test]
2797 fn smoke() {
2798 let mut tank = FuelTank::new();
2799 tank.set_fuel(10);
2800 assert_eq!(tank.consumed_fuel, -10);
2801 assert_eq!(tank.reserve_fuel, 0);
2802
2803 tank.yield_interval = NonZeroU64::new(10);
2804 tank.set_fuel(25);
2805 assert_eq!(tank.consumed_fuel, -10);
2806 assert_eq!(tank.reserve_fuel, 15);
2807 }
2808
2809 #[test]
2810 fn does_not_lose_precision() {
2811 let mut tank = FuelTank::new();
2812 tank.set_fuel(u64::MAX);
2813 assert_eq!(tank.get_fuel(), u64::MAX);
2814
2815 tank.set_fuel(i64::MAX as u64);
2816 assert_eq!(tank.get_fuel(), i64::MAX as u64);
2817
2818 tank.set_fuel(i64::MAX as u64 + 1);
2819 assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2820 }
2821
2822 #[test]
2823 fn yielding_does_not_lose_precision() {
2824 let mut tank = FuelTank::new();
2825
2826 tank.yield_interval = NonZeroU64::new(10);
2827 tank.set_fuel(u64::MAX);
2828 assert_eq!(tank.get_fuel(), u64::MAX);
2829 assert_eq!(tank.consumed_fuel, -10);
2830 assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2831
2832 tank.yield_interval = NonZeroU64::new(u64::MAX);
2833 tank.set_fuel(u64::MAX);
2834 assert_eq!(tank.get_fuel(), u64::MAX);
2835 assert_eq!(tank.consumed_fuel, -i64::MAX);
2836 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2837
2838 tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2839 tank.set_fuel(u64::MAX);
2840 assert_eq!(tank.get_fuel(), u64::MAX);
2841 assert_eq!(tank.consumed_fuel, -i64::MAX);
2842 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2843 }
2844
2845 #[test]
2846 fn refueling() {
2847 // It's possible to fuel to have consumed over the limit as some instructions can consume
2848 // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2849 // add more fuel than there is.
2850 let mut tank = FuelTank::new();
2851
2852 tank.yield_interval = NonZeroU64::new(10);
2853 tank.reserve_fuel = 42;
2854 tank.consumed_fuel = 4;
2855 assert!(tank.refuel());
2856 assert_eq!(tank.reserve_fuel, 28);
2857 assert_eq!(tank.consumed_fuel, -10);
2858
2859 tank.yield_interval = NonZeroU64::new(1);
2860 tank.reserve_fuel = 8;
2861 tank.consumed_fuel = 4;
2862 assert_eq!(tank.get_fuel(), 4);
2863 assert!(tank.refuel());
2864 assert_eq!(tank.reserve_fuel, 3);
2865 assert_eq!(tank.consumed_fuel, -1);
2866 assert_eq!(tank.get_fuel(), 4);
2867
2868 tank.yield_interval = NonZeroU64::new(10);
2869 tank.reserve_fuel = 3;
2870 tank.consumed_fuel = 4;
2871 assert_eq!(tank.get_fuel(), 0);
2872 assert!(!tank.refuel());
2873 assert_eq!(tank.reserve_fuel, 3);
2874 assert_eq!(tank.consumed_fuel, 4);
2875 assert_eq!(tank.get_fuel(), 0);
2876 }
2877}