wasmtime/
config.rs

1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6use hashbrown::{HashMap, HashSet};
7use serde_derive::{Deserialize, Serialize};
8#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
9use std::path::Path;
10use target_lexicon::Architecture;
11use wasmparser::WasmFeatures;
12#[cfg(feature = "cache")]
13use wasmtime_cache::CacheConfig;
14use wasmtime_environ::Tunables;
15
16#[cfg(feature = "runtime")]
17use crate::memory::MemoryCreator;
18#[cfg(feature = "runtime")]
19use crate::profiling_agent::{self, ProfilingAgent};
20#[cfg(feature = "runtime")]
21use crate::runtime::vm::{
22    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
23};
24#[cfg(feature = "runtime")]
25use crate::trampoline::MemoryCreatorProxy;
26
27#[cfg(feature = "async")]
28use crate::stack::{StackCreator, StackCreatorProxy};
29#[cfg(feature = "async")]
30use wasmtime_fiber::RuntimeFiberStackCreator;
31
32#[cfg(feature = "pooling-allocator")]
33pub use crate::runtime::vm::MpkEnabled;
34#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
35pub use wasmtime_environ::CacheStore;
36
37/// Represents the module instance allocation strategy to use.
38#[derive(Clone)]
39pub enum InstanceAllocationStrategy {
40    /// The on-demand instance allocation strategy.
41    ///
42    /// Resources related to a module instance are allocated at instantiation time and
43    /// immediately deallocated when the `Store` referencing the instance is dropped.
44    ///
45    /// This is the default allocation strategy for Wasmtime.
46    OnDemand,
47    /// The pooling instance allocation strategy.
48    ///
49    /// A pool of resources is created in advance and module instantiation reuses resources
50    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
51    /// is dropped.
52    #[cfg(feature = "pooling-allocator")]
53    Pooling(PoolingAllocationConfig),
54}
55
56impl InstanceAllocationStrategy {
57    /// The default pooling instance allocation strategy.
58    #[cfg(feature = "pooling-allocator")]
59    pub fn pooling() -> Self {
60        Self::Pooling(Default::default())
61    }
62}
63
64impl Default for InstanceAllocationStrategy {
65    fn default() -> Self {
66        Self::OnDemand
67    }
68}
69
70#[derive(Clone)]
71/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
72pub enum ModuleVersionStrategy {
73    /// Use the wasmtime crate's Cargo package version.
74    WasmtimeVersion,
75    /// Use a custom version string. Must be at most 255 bytes.
76    Custom(String),
77    /// Emit no version string in serialization, and accept all version strings in deserialization.
78    None,
79}
80
81impl Default for ModuleVersionStrategy {
82    fn default() -> Self {
83        ModuleVersionStrategy::WasmtimeVersion
84    }
85}
86
87impl core::hash::Hash for ModuleVersionStrategy {
88    fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
89        match self {
90            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
91            Self::Custom(s) => s.hash(hasher),
92            Self::None => {}
93        };
94    }
95}
96
97/// Global configuration options used to create an [`Engine`](crate::Engine)
98/// and customize its behavior.
99///
100/// This structure exposed a builder-like interface and is primarily consumed by
101/// [`Engine::new()`](crate::Engine::new).
102///
103/// The validation of `Config` is deferred until the engine is being built, thus
104/// a problematic config may cause `Engine::new` to fail.
105#[derive(Clone)]
106pub struct Config {
107    #[cfg(any(feature = "cranelift", feature = "winch"))]
108    compiler_config: CompilerConfig,
109    profiling_strategy: ProfilingStrategy,
110    tunables: ConfigTunables,
111
112    #[cfg(feature = "cache")]
113    pub(crate) cache_config: CacheConfig,
114    #[cfg(feature = "runtime")]
115    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
116    pub(crate) allocation_strategy: InstanceAllocationStrategy,
117    pub(crate) max_wasm_stack: usize,
118    /// Explicitly enabled features via `Config::wasm_*` methods. This is a
119    /// signal that the embedder specifically wants something turned on
120    /// regardless of the defaults that Wasmtime might otherwise have enabled.
121    ///
122    /// Note that this, and `disabled_features` below, start as the empty set of
123    /// features to only track explicit user requests.
124    pub(crate) enabled_features: WasmFeatures,
125    /// Same as `enabled_features`, but for those that are explicitly disabled.
126    pub(crate) disabled_features: WasmFeatures,
127    pub(crate) wasm_backtrace: bool,
128    pub(crate) wasm_backtrace_details_env_used: bool,
129    pub(crate) native_unwind_info: Option<bool>,
130    #[cfg(feature = "async")]
131    pub(crate) async_stack_size: usize,
132    #[cfg(feature = "async")]
133    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
134    pub(crate) async_support: bool,
135    pub(crate) module_version: ModuleVersionStrategy,
136    pub(crate) parallel_compilation: bool,
137    pub(crate) memory_init_cow: bool,
138    pub(crate) memory_guaranteed_dense_image_size: u64,
139    pub(crate) force_memory_init_memfd: bool,
140    pub(crate) wmemcheck: bool,
141    pub(crate) coredump_on_trap: bool,
142    pub(crate) macos_use_mach_ports: bool,
143    pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
144}
145
146#[derive(Default, Clone)]
147struct ConfigTunables {
148    static_memory_reservation: Option<u64>,
149    static_memory_offset_guard_size: Option<u64>,
150    dynamic_memory_offset_guard_size: Option<u64>,
151    dynamic_memory_growth_reserve: Option<u64>,
152    generate_native_debuginfo: Option<bool>,
153    parse_wasm_debuginfo: Option<bool>,
154    consume_fuel: Option<bool>,
155    epoch_interruption: Option<bool>,
156    static_memory_bound_is_maximum: Option<bool>,
157    guard_before_linear_memory: Option<bool>,
158    table_lazy_init: Option<bool>,
159    generate_address_map: Option<bool>,
160    debug_adapter_modules: Option<bool>,
161    relaxed_simd_deterministic: Option<bool>,
162}
163
164/// User-provided configuration for the compiler.
165#[cfg(any(feature = "cranelift", feature = "winch"))]
166#[derive(Debug, Clone)]
167struct CompilerConfig {
168    strategy: Option<Strategy>,
169    target: Option<target_lexicon::Triple>,
170    settings: HashMap<String, String>,
171    flags: HashSet<String>,
172    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
173    cache_store: Option<Arc<dyn CacheStore>>,
174    clif_dir: Option<std::path::PathBuf>,
175    wmemcheck: bool,
176}
177
178#[cfg(any(feature = "cranelift", feature = "winch"))]
179impl CompilerConfig {
180    fn new() -> Self {
181        Self {
182            strategy: Strategy::Auto.not_auto(),
183            target: None,
184            settings: HashMap::new(),
185            flags: HashSet::new(),
186            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
187            cache_store: None,
188            clif_dir: None,
189            wmemcheck: false,
190        }
191    }
192
193    /// Ensures that the key is not set or equals to the given value.
194    /// If the key is not set, it will be set to the given value.
195    ///
196    /// # Returns
197    ///
198    /// Returns true if successfully set or already had the given setting
199    /// value, or false if the setting was explicitly set to something
200    /// else previously.
201    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
202        if let Some(value) = self.settings.get(k) {
203            if value != v {
204                return false;
205            }
206        } else {
207            self.settings.insert(k.to_string(), v.to_string());
208        }
209        true
210    }
211}
212
213#[cfg(any(feature = "cranelift", feature = "winch"))]
214impl Default for CompilerConfig {
215    fn default() -> Self {
216        Self::new()
217    }
218}
219
220impl Config {
221    /// Creates a new configuration object with the default configuration
222    /// specified.
223    pub fn new() -> Self {
224        let mut ret = Self {
225            tunables: ConfigTunables::default(),
226            #[cfg(any(feature = "cranelift", feature = "winch"))]
227            compiler_config: CompilerConfig::default(),
228            #[cfg(feature = "cache")]
229            cache_config: CacheConfig::new_cache_disabled(),
230            profiling_strategy: ProfilingStrategy::None,
231            #[cfg(feature = "runtime")]
232            mem_creator: None,
233            allocation_strategy: InstanceAllocationStrategy::OnDemand,
234            // 512k of stack -- note that this is chosen currently to not be too
235            // big, not be too small, and be a good default for most platforms.
236            // One platform of particular note is Windows where the stack size
237            // of the main thread seems to, by default, be smaller than that of
238            // Linux and macOS. This 512k value at least lets our current test
239            // suite pass on the main thread of Windows (using `--test-threads
240            // 1` forces this), or at least it passed when this change was
241            // committed.
242            max_wasm_stack: 512 * 1024,
243            wasm_backtrace: true,
244            wasm_backtrace_details_env_used: false,
245            native_unwind_info: None,
246            enabled_features: WasmFeatures::empty(),
247            disabled_features: WasmFeatures::empty(),
248            #[cfg(feature = "async")]
249            async_stack_size: 2 << 20,
250            #[cfg(feature = "async")]
251            stack_creator: None,
252            async_support: false,
253            module_version: ModuleVersionStrategy::default(),
254            parallel_compilation: !cfg!(miri),
255            memory_init_cow: true,
256            memory_guaranteed_dense_image_size: 16 << 20,
257            force_memory_init_memfd: false,
258            wmemcheck: false,
259            coredump_on_trap: false,
260            macos_use_mach_ports: !cfg!(miri),
261            #[cfg(feature = "std")]
262            detect_host_feature: Some(detect_host_feature),
263            #[cfg(not(feature = "std"))]
264            detect_host_feature: None,
265        };
266        #[cfg(any(feature = "cranelift", feature = "winch"))]
267        {
268            ret.cranelift_debug_verifier(false);
269            ret.cranelift_opt_level(OptLevel::Speed);
270        }
271
272        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
273
274        ret
275    }
276
277    /// Sets the target triple for the [`Config`].
278    ///
279    /// By default, the host target triple is used for the [`Config`].
280    ///
281    /// This method can be used to change the target triple.
282    ///
283    /// Cranelift flags will not be inferred for the given target and any
284    /// existing target-specific Cranelift flags will be cleared.
285    ///
286    /// # Errors
287    ///
288    /// This method will error if the given target triple is not supported.
289    #[cfg(any(feature = "cranelift", feature = "winch"))]
290    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
291        self.compiler_config.target =
292            Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
293
294        Ok(self)
295    }
296
297    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
298    /// backend for storage.
299    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
300    pub fn enable_incremental_compilation(
301        &mut self,
302        cache_store: Arc<dyn CacheStore>,
303    ) -> Result<&mut Self> {
304        self.compiler_config.cache_store = Some(cache_store);
305        Ok(self)
306    }
307
308    /// Whether or not to enable support for asynchronous functions in Wasmtime.
309    ///
310    /// When enabled, the config can optionally define host functions with `async`.
311    /// Instances created and functions called with this `Config` *must* be called
312    /// through their asynchronous APIs, however. For example using
313    /// [`Func::call`](crate::Func::call) will panic when used with this config.
314    ///
315    /// # Asynchronous Wasm
316    ///
317    /// WebAssembly does not currently have a way to specify at the bytecode
318    /// level what is and isn't async. Host-defined functions, however, may be
319    /// defined as `async`. WebAssembly imports always appear synchronous, which
320    /// gives rise to a bit of an impedance mismatch here. To solve this
321    /// Wasmtime supports "asynchronous configs" which enables calling these
322    /// asynchronous functions in a way that looks synchronous to the executing
323    /// WebAssembly code.
324    ///
325    /// An asynchronous config must always invoke wasm code asynchronously,
326    /// meaning we'll always represent its computation as a
327    /// [`Future`](std::future::Future). The `poll` method of the futures
328    /// returned by Wasmtime will perform the actual work of calling the
329    /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
330    /// that's left up to the embedder.
331    ///
332    /// To implement futures in a way that WebAssembly sees asynchronous host
333    /// functions as synchronous, all async Wasmtime futures will execute on a
334    /// separately allocated native stack from the thread otherwise executing
335    /// Wasmtime. This separate native stack can then be switched to and from.
336    /// Using this whenever an `async` host function returns a future that
337    /// resolves to `Pending` we switch away from the temporary stack back to
338    /// the main stack and propagate the `Pending` status.
339    ///
340    /// In general it's encouraged that the integration with `async` and
341    /// wasmtime is designed early on in your embedding of Wasmtime to ensure
342    /// that it's planned that WebAssembly executes in the right context of your
343    /// application.
344    ///
345    /// # Execution in `poll`
346    ///
347    /// The [`Future::poll`](std::future::Future::poll) method is the main
348    /// driving force behind Rust's futures. That method's own documentation
349    /// states "an implementation of `poll` should strive to return quickly, and
350    /// should not block". This, however, can be at odds with executing
351    /// WebAssembly code as part of the `poll` method itself. If your
352    /// WebAssembly is untrusted then this could allow the `poll` method to take
353    /// arbitrarily long in the worst case, likely blocking all other
354    /// asynchronous tasks.
355    ///
356    /// To remedy this situation you have a a few possible ways to solve this:
357    ///
358    /// * The most efficient solution is to enable
359    ///   [`Config::epoch_interruption`] in conjunction with
360    ///   [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
361    ///   periodic calls to [`crate::Engine::increment_epoch`] this will cause
362    ///   executing WebAssembly to periodically yield back according to the
363    ///   epoch configuration settings. This enables `Future::poll` to take at
364    ///   most a certain amount of time according to epoch configuration
365    ///   settings and when increments happen. The benefit of this approach is
366    ///   that the instrumentation in compiled code is quite lightweight, but a
367    ///   downside can be that the scheduling is somewhat nondeterministic since
368    ///   increments are usually timer-based which are not always deterministic.
369    ///
370    ///   Note that to prevent infinite execution of wasm it's recommended to
371    ///   place a timeout on the entire future representing executing wasm code
372    ///   and the periodic yields with epochs should ensure that when the
373    ///   timeout is reached it's appropriately recognized.
374    ///
375    /// * Alternatively you can enable the
376    ///   [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
377    ///   as [`crate::Store::fuel_async_yield_interval`] When doing so this will
378    ///   configure Wasmtime futures to yield periodically while they're
379    ///   executing WebAssembly code. After consuming the specified amount of
380    ///   fuel wasm futures will return `Poll::Pending` from their `poll`
381    ///   method, and will get automatically re-polled later. This enables the
382    ///   `Future::poll` method to take roughly a fixed amount of time since
383    ///   fuel is guaranteed to get consumed while wasm is executing. Unlike
384    ///   epoch-based preemption this is deterministic since wasm always
385    ///   consumes a fixed amount of fuel per-operation. The downside of this
386    ///   approach, however, is that the compiled code instrumentation is
387    ///   significantly more expensive than epoch checks.
388    ///
389    ///   Note that to prevent infinite execution of wasm it's recommended to
390    ///   place a timeout on the entire future representing executing wasm code
391    ///   and the periodic yields with epochs should ensure that when the
392    ///   timeout is reached it's appropriately recognized.
393    ///
394    /// In all cases special care needs to be taken when integrating
395    /// asynchronous wasm into your application. You should carefully plan where
396    /// WebAssembly will execute and what compute resources will be allotted to
397    /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
398    /// feel free to open an issue!
399    #[cfg(feature = "async")]
400    pub fn async_support(&mut self, enable: bool) -> &mut Self {
401        self.async_support = enable;
402        self
403    }
404
405    /// Configures whether DWARF debug information will be emitted during
406    /// compilation.
407    ///
408    /// Note that the `debug-builtins` compile-time Cargo feature must also be
409    /// enabled for native debuggers such as GDB or LLDB to be able to debug
410    /// guest WebAssembly programs.
411    ///
412    /// By default this option is `false`.
413    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
414        self.tunables.generate_native_debuginfo = Some(enable);
415        self
416    }
417
418    /// Configures whether [`WasmBacktrace`] will be present in the context of
419    /// errors returned from Wasmtime.
420    ///
421    /// A backtrace may be collected whenever an error is returned from a host
422    /// function call through to WebAssembly or when WebAssembly itself hits a
423    /// trap condition, such as an out-of-bounds memory access. This flag
424    /// indicates, in these conditions, whether the backtrace is collected or
425    /// not.
426    ///
427    /// Currently wasm backtraces are implemented through frame pointer walking.
428    /// This means that collecting a backtrace is expected to be a fast and
429    /// relatively cheap operation. Additionally backtrace collection is
430    /// suitable in concurrent environments since one thread capturing a
431    /// backtrace won't block other threads.
432    ///
433    /// Collected backtraces are attached via [`anyhow::Error::context`] to
434    /// errors returned from host functions. The [`WasmBacktrace`] type can be
435    /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
436    /// When this option is disabled then this context is never applied to
437    /// errors coming out of wasm.
438    ///
439    /// This option is `true` by default.
440    ///
441    /// [`WasmBacktrace`]: crate::WasmBacktrace
442    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
443        self.wasm_backtrace = enable;
444        self
445    }
446
447    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
448    /// have filename/line number information.
449    ///
450    /// When enabled this will causes modules to retain debugging information
451    /// found in wasm binaries. This debug information will be used when a trap
452    /// happens to symbolicate each stack frame and attempt to print a
453    /// filename/line number for each wasm frame in the stack trace.
454    ///
455    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
456    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
457    /// details should be parsed. Note that the `std` feature of this crate must
458    /// be active to read environment variables, otherwise this is disabled by
459    /// default.
460    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
461        self.wasm_backtrace_details_env_used = false;
462        self.tunables.parse_wasm_debuginfo = match enable {
463            WasmBacktraceDetails::Enable => Some(true),
464            WasmBacktraceDetails::Disable => Some(false),
465            WasmBacktraceDetails::Environment => {
466                self.wasm_backtrace_details_env_used = true;
467                #[cfg(feature = "std")]
468                {
469                    std::env::var("WASMTIME_BACKTRACE_DETAILS")
470                        .map(|s| Some(s == "1"))
471                        .unwrap_or(Some(false))
472                }
473                #[cfg(not(feature = "std"))]
474                {
475                    Some(false)
476                }
477            }
478        };
479        self
480    }
481
482    /// Configures whether to generate native unwind information
483    /// (e.g. `.eh_frame` on Linux).
484    ///
485    /// This configuration option only exists to help third-party stack
486    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
487    /// crate, determine how to unwind through Wasm frames. It does not affect
488    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
489    /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
490    /// option.
491    ///
492    /// Native unwind information is included:
493    /// - When targeting Windows, since the Windows ABI requires it.
494    /// - By default.
495    ///
496    /// [`WasmBacktrace`]: crate::WasmBacktrace
497    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
498        self.native_unwind_info = Some(enable);
499        self
500    }
501
502    /// Configures whether execution of WebAssembly will "consume fuel" to
503    /// either halt or yield execution as desired.
504    ///
505    /// This can be used to deterministically prevent infinitely-executing
506    /// WebAssembly code by instrumenting generated code to consume fuel as it
507    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
508    /// configured to yield execution periodically via
509    /// [`crate::Store::fuel_async_yield_interval`].
510    ///
511    /// Note that a [`Store`] starts with no fuel, so if you enable this option
512    /// you'll have to be sure to pour some fuel into [`Store`] before
513    /// executing some code.
514    ///
515    /// By default this option is `false`.
516    ///
517    /// [`Store`]: crate::Store
518    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
519        self.tunables.consume_fuel = Some(enable);
520        self
521    }
522
523    /// Enables epoch-based interruption.
524    ///
525    /// When executing code in async mode, we sometimes want to
526    /// implement a form of cooperative timeslicing: long-running Wasm
527    /// guest code should periodically yield to the executor
528    /// loop. This yielding could be implemented by using "fuel" (see
529    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
530    /// instrumentation is somewhat expensive: it modifies the
531    /// compiled form of the Wasm code so that it maintains a precise
532    /// instruction count, frequently checking this count against the
533    /// remaining fuel. If one does not need this precise count or
534    /// deterministic interruptions, and only needs a periodic
535    /// interrupt of some form, then It would be better to have a more
536    /// lightweight mechanism.
537    ///
538    /// Epoch-based interruption is that mechanism. There is a global
539    /// "epoch", which is a counter that divides time into arbitrary
540    /// periods (or epochs). This counter lives on the
541    /// [`Engine`](crate::Engine) and can be incremented by calling
542    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
543    /// Epoch-based instrumentation works by setting a "deadline
544    /// epoch". The compiled code knows the deadline, and at certain
545    /// points, checks the current epoch against that deadline. It
546    /// will yield if the deadline has been reached.
547    ///
548    /// The idea is that checking an infrequently-changing counter is
549    /// cheaper than counting and frequently storing a precise metric
550    /// (instructions executed) locally. The interruptions are not
551    /// deterministic, but if the embedder increments the epoch in a
552    /// periodic way (say, every regular timer tick by a thread or
553    /// signal handler), then we can ensure that all async code will
554    /// yield to the executor within a bounded time.
555    ///
556    /// The deadline check cannot be avoided by malicious wasm code. It is safe
557    /// to use epoch deadlines to limit the execution time of untrusted
558    /// code.
559    ///
560    /// The [`Store`](crate::Store) tracks the deadline, and controls
561    /// what happens when the deadline is reached during
562    /// execution. Several behaviors are possible:
563    ///
564    /// - Trap if code is executing when the epoch deadline is
565    ///   met. See
566    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
567    ///
568    /// - Call an arbitrary function. This function may chose to trap or
569    ///   increment the epoch. See
570    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
571    ///
572    /// - Yield to the executor loop, then resume when the future is
573    ///   next polled. See
574    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
575    ///
576    /// Trapping is the default. The yielding behaviour may be used for
577    /// the timeslicing behavior described above.
578    ///
579    /// This feature is available with or without async support.
580    /// However, without async support, the timeslicing behaviour is
581    /// not available. This means epoch-based interruption can only
582    /// serve as a simple external-interruption mechanism.
583    ///
584    /// An initial deadline must be set before executing code by calling
585    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
586    /// deadline is not configured then wasm will immediately trap.
587    ///
588    /// ## Interaction with blocking host calls
589    ///
590    /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
591    /// a call to the host. For example if the WebAssembly function calls
592    /// `wasi:io/poll/poll` to sleep epochs will not assist in waking this up or
593    /// timing it out. Epochs intentionally only affect running WebAssembly code
594    /// itself and it's left to the embedder to determine how best to wake up
595    /// indefinitely blocking code in the host.
596    ///
597    /// The typical solution for this, however, is to use
598    /// [`Config::async_support(true)`](Config::async_support) and the `async`
599    /// variant of WASI host functions. This models computation as a Rust
600    /// `Future` which means that when blocking happens the future is only
601    /// suspended and control yields back to the main event loop. This gives the
602    /// embedder the opportunity to use `tokio::time::timeout` for example on a
603    /// wasm computation and have the desired effect of cancelling a blocking
604    /// operation when a timeout expires.
605    ///
606    /// ## When to use fuel vs. epochs
607    ///
608    /// In general, epoch-based interruption results in faster
609    /// execution. This difference is sometimes significant: in some
610    /// measurements, up to 2-3x. This is because epoch-based
611    /// interruption does less work: it only watches for a global
612    /// rarely-changing counter to increment, rather than keeping a
613    /// local frequently-changing counter and comparing it to a
614    /// deadline.
615    ///
616    /// Fuel, in contrast, should be used when *deterministic*
617    /// yielding or trapping is needed. For example, if it is required
618    /// that the same function call with the same starting state will
619    /// always either complete or trap with an out-of-fuel error,
620    /// deterministically, then fuel with a fixed bound should be
621    /// used.
622    ///
623    /// # See Also
624    ///
625    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
626    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
627    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
628    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
629    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
630    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
631        self.tunables.epoch_interruption = Some(enable);
632        self
633    }
634
635    /// Configures the maximum amount of stack space available for
636    /// executing WebAssembly code.
637    ///
638    /// WebAssembly has well-defined semantics on stack overflow. This is
639    /// intended to be a knob which can help configure how much stack space
640    /// wasm execution is allowed to consume. Note that the number here is not
641    /// super-precise, but rather wasm will take at most "pretty close to this
642    /// much" stack space.
643    ///
644    /// If a wasm call (or series of nested wasm calls) take more stack space
645    /// than the `size` specified then a stack overflow trap will be raised.
646    ///
647    /// Caveat: this knob only limits the stack space consumed by wasm code.
648    /// More importantly, it does not ensure that this much stack space is
649    /// available on the calling thread stack. Exhausting the thread stack
650    /// typically leads to an **abort** of the process.
651    ///
652    /// Here are some examples of how that could happen:
653    ///
654    /// - Let's assume this option is set to 2 MiB and then a thread that has
655    ///   a stack with 512 KiB left.
656    ///
657    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
658    ///
659    /// - Assuming the same conditions, but this time wasm code does not consume
660    ///   any stack but calls into a host function. The host function consumes
661    ///   more than 512 KiB of stack space. The process will be aborted.
662    ///
663    /// There's another gotcha related to recursive calling into wasm: the stack
664    /// space consumed by a host function is counted towards this limit. The
665    /// host functions are not prevented from consuming more than this limit.
666    /// However, if the host function that used more than this limit and called
667    /// back into wasm, then the execution will trap immediately because of
668    /// stack overflow.
669    ///
670    /// When the `async` feature is enabled, this value cannot exceed the
671    /// `async_stack_size` option. Be careful not to set this value too close
672    /// to `async_stack_size` as doing so may limit how much stack space
673    /// is available for host functions.
674    ///
675    /// By default this option is 512 KiB.
676    ///
677    /// # Errors
678    ///
679    /// The `Engine::new` method will fail if the `size` specified here is
680    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
681    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
682        self.max_wasm_stack = size;
683        self
684    }
685
686    /// Configures the size of the stacks used for asynchronous execution.
687    ///
688    /// This setting configures the size of the stacks that are allocated for
689    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
690    ///
691    /// The amount of stack space guaranteed for host functions is
692    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
693    /// close to one another; doing so may cause host functions to overflow the
694    /// stack and abort the process.
695    ///
696    /// By default this option is 2 MiB.
697    ///
698    /// # Errors
699    ///
700    /// The `Engine::new` method will fail if the value for this option is
701    /// smaller than the [`Config::max_wasm_stack`] option.
702    #[cfg(feature = "async")]
703    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
704        self.async_stack_size = size;
705        self
706    }
707
708    fn wasm_feature(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
709        self.enabled_features.set(flag, enable);
710        self.disabled_features.set(flag, !enable);
711        self
712    }
713
714    /// Configures whether the WebAssembly tail calls proposal will be enabled
715    /// for compilation or not.
716    ///
717    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
718    /// `return_call_indirect` instructions. These instructions allow for Wasm
719    /// programs to implement some recursive algorithms with *O(1)* stack space
720    /// usage.
721    ///
722    /// This is `true` by default except when the Winch compiler is enabled.
723    ///
724    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
725    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
726        self.wasm_feature(WasmFeatures::TAIL_CALL, enable);
727        self
728    }
729
730    /// Configures whether the WebAssembly custom-page-sizes proposal will be
731    /// enabled for compilation or not.
732    ///
733    /// The [WebAssembly custom-page-sizes proposal] allows a memory to
734    /// customize its page sizes. By default, Wasm page sizes are 64KiB
735    /// large. This proposal allows the memory to opt into smaller page sizes
736    /// instead, allowing Wasm to run in environments with less than 64KiB RAM
737    /// available, for example.
738    ///
739    /// Note that the page size is part of the memory's type, and because
740    /// different memories may have different types, they may also have
741    /// different page sizes.
742    ///
743    /// Currently the only valid page sizes are 64KiB (the default) and 1
744    /// byte. Future extensions may relax this constraint and allow all powers
745    /// of two.
746    ///
747    /// Support for this proposal is disabled by default.
748    ///
749    /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
750    pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
751        self.wasm_feature(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
752        self
753    }
754
755    /// Configures whether the WebAssembly [threads] proposal will be enabled
756    /// for compilation.
757    ///
758    /// This feature gates items such as shared memories and atomic
759    /// instructions. Note that the threads feature depends on the bulk memory
760    /// feature, which is enabled by default. Additionally note that while the
761    /// wasm feature is called "threads" it does not actually include the
762    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
763    /// proposal which is a separately gated feature in Wasmtime.
764    ///
765    /// Embeddings of Wasmtime are able to build their own custom threading
766    /// scheme on top of the core wasm threads proposal, however.
767    ///
768    /// This is `true` by default.
769    ///
770    /// [threads]: https://github.com/webassembly/threads
771    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
772    #[cfg(feature = "threads")]
773    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
774        self.wasm_feature(WasmFeatures::THREADS, enable);
775        self
776    }
777
778    /// Configures whether the [WebAssembly reference types proposal][proposal]
779    /// will be enabled for compilation.
780    ///
781    /// This feature gates items such as the `externref` and `funcref` types as
782    /// well as allowing a module to define multiple tables.
783    ///
784    /// Note that the reference types proposal depends on the bulk memory proposal.
785    ///
786    /// This feature is `true` by default.
787    ///
788    /// # Errors
789    ///
790    /// The validation of this feature are deferred until the engine is being built,
791    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
792    ///
793    /// [proposal]: https://github.com/webassembly/reference-types
794    #[cfg(feature = "gc")]
795    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
796        self.wasm_feature(WasmFeatures::REFERENCE_TYPES, enable);
797        self
798    }
799
800    /// Configures whether the [WebAssembly function references
801    /// proposal][proposal] will be enabled for compilation.
802    ///
803    /// This feature gates non-nullable reference types, function reference
804    /// types, `call_ref`, `ref.func`, and non-nullable reference related
805    /// instructions.
806    ///
807    /// Note that the function references proposal depends on the reference
808    /// types proposal.
809    ///
810    /// This feature is `false` by default.
811    ///
812    /// [proposal]: https://github.com/WebAssembly/function-references
813    #[cfg(feature = "gc")]
814    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
815        self.wasm_feature(WasmFeatures::FUNCTION_REFERENCES, enable);
816        self
817    }
818
819    /// Configures whether the [WebAssembly Garbage Collection
820    /// proposal][proposal] will be enabled for compilation.
821    ///
822    /// This feature gates `struct` and `array` type definitions and references,
823    /// the `i31ref` type, and all related instructions.
824    ///
825    /// Note that the function references proposal depends on the typed function
826    /// references proposal.
827    ///
828    /// This feature is `false` by default.
829    ///
830    /// **Warning: Wasmtime's implementation of the GC proposal is still in
831    /// progress and generally not ready for primetime.**
832    ///
833    /// [proposal]: https://github.com/WebAssembly/gc
834    #[cfg(feature = "gc")]
835    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
836        self.wasm_feature(WasmFeatures::GC, enable);
837        self
838    }
839
840    /// Configures whether the WebAssembly SIMD proposal will be
841    /// enabled for compilation.
842    ///
843    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
844    /// as the `v128` type and all of its operators being in a module. Note that
845    /// this does not enable the [relaxed simd proposal].
846    ///
847    /// On x86_64 platforms note that enabling this feature requires SSE 4.2 and
848    /// below to be available on the target platform. Compilation will fail if
849    /// the compile target does not include SSE 4.2.
850    ///
851    /// This is `true` by default.
852    ///
853    /// [proposal]: https://github.com/webassembly/simd
854    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
855    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
856        self.wasm_feature(WasmFeatures::SIMD, enable);
857        self
858    }
859
860    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
861    /// enabled for compilation.
862    ///
863    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
864    /// for some specific inputs, are allowed to produce different results on
865    /// different hosts. More-or-less this proposal enables exposing
866    /// platform-specific semantics of SIMD instructions in a controlled
867    /// fashion to a WebAssembly program. From an embedder's perspective this
868    /// means that WebAssembly programs may execute differently depending on
869    /// whether the host is x86_64 or AArch64, for example.
870    ///
871    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
872    /// lowering for the platform it's running on. This means that, by default,
873    /// some relaxed SIMD instructions may have different results for the same
874    /// inputs across x86_64 and AArch64. This behavior can be disabled through
875    /// the [`Config::relaxed_simd_deterministic`] option which will force
876    /// deterministic behavior across all platforms, as classified by the
877    /// specification, at the cost of performance.
878    ///
879    /// This is `true` by default.
880    ///
881    /// [proposal]: https://github.com/webassembly/relaxed-simd
882    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
883        self.wasm_feature(WasmFeatures::RELAXED_SIMD, enable);
884        self
885    }
886
887    /// This option can be used to control the behavior of the [relaxed SIMD
888    /// proposal's][proposal] instructions.
889    ///
890    /// The relaxed SIMD proposal introduces instructions that are allowed to
891    /// have different behavior on different architectures, primarily to afford
892    /// an efficient implementation on all architectures. This means, however,
893    /// that the same module may execute differently on one host than another,
894    /// which typically is not otherwise the case. This option is provided to
895    /// force Wasmtime to generate deterministic code for all relaxed simd
896    /// instructions, at the cost of performance, for all architectures. When
897    /// this option is enabled then the deterministic behavior of all
898    /// instructions in the relaxed SIMD proposal is selected.
899    ///
900    /// This is `false` by default.
901    ///
902    /// [proposal]: https://github.com/webassembly/relaxed-simd
903    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
904        self.tunables.relaxed_simd_deterministic = Some(enable);
905        self
906    }
907
908    /// Configures whether the [WebAssembly bulk memory operations
909    /// proposal][proposal] will be enabled for compilation.
910    ///
911    /// This feature gates items such as the `memory.copy` instruction, passive
912    /// data/table segments, etc, being in a module.
913    ///
914    /// This is `true` by default.
915    ///
916    /// Feature `reference_types`, which is also `true` by default, requires
917    /// this feature to be enabled. Thus disabling this feature must also disable
918    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
919    ///
920    /// # Errors
921    ///
922    /// Disabling this feature without disabling `reference_types` will cause
923    /// `Engine::new` to fail.
924    ///
925    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
926    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
927        self.wasm_feature(WasmFeatures::BULK_MEMORY, enable);
928        self
929    }
930
931    /// Configures whether the WebAssembly multi-value [proposal] will
932    /// be enabled for compilation.
933    ///
934    /// This feature gates functions and blocks returning multiple values in a
935    /// module, for example.
936    ///
937    /// This is `true` by default.
938    ///
939    /// [proposal]: https://github.com/webassembly/multi-value
940    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
941        self.wasm_feature(WasmFeatures::MULTI_VALUE, enable);
942        self
943    }
944
945    /// Configures whether the WebAssembly multi-memory [proposal] will
946    /// be enabled for compilation.
947    ///
948    /// This feature gates modules having more than one linear memory
949    /// declaration or import.
950    ///
951    /// This is `true` by default.
952    ///
953    /// [proposal]: https://github.com/webassembly/multi-memory
954    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
955        self.wasm_feature(WasmFeatures::MULTI_MEMORY, enable);
956        self
957    }
958
959    /// Configures whether the WebAssembly memory64 [proposal] will
960    /// be enabled for compilation.
961    ///
962    /// Note that this the upstream specification is not finalized and Wasmtime
963    /// may also have bugs for this feature since it hasn't been exercised
964    /// much.
965    ///
966    /// This is `false` by default.
967    ///
968    /// [proposal]: https://github.com/webassembly/memory64
969    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
970        self.wasm_feature(WasmFeatures::MEMORY64, enable);
971        self
972    }
973
974    /// Configures whether the WebAssembly extended-const [proposal] will
975    /// be enabled for compilation.
976    ///
977    /// This is `true` by default.
978    ///
979    /// [proposal]: https://github.com/webassembly/extended-const
980    pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
981        self.wasm_feature(WasmFeatures::EXTENDED_CONST, enable);
982        self
983    }
984
985    /// Configures whether the WebAssembly component-model [proposal] will
986    /// be enabled for compilation.
987    ///
988    /// Note that this feature is a work-in-progress and is incomplete.
989    ///
990    /// This is `false` by default.
991    ///
992    /// [proposal]: https://github.com/webassembly/component-model
993    #[cfg(feature = "component-model")]
994    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
995        self.wasm_feature(WasmFeatures::COMPONENT_MODEL, enable);
996        self
997    }
998
999    /// Configures whether components support more than 32 flags in each `flags`
1000    /// type.
1001    ///
1002    /// This is part of the transition plan in
1003    /// https://github.com/WebAssembly/component-model/issues/370.
1004    #[cfg(feature = "component-model")]
1005    pub fn wasm_component_model_more_flags(&mut self, enable: bool) -> &mut Self {
1006        self.wasm_feature(WasmFeatures::COMPONENT_MODEL_MORE_FLAGS, enable);
1007        self
1008    }
1009
1010    /// Configures whether components support more than one return value for functions.
1011    ///
1012    /// This is part of the transition plan in
1013    /// https://github.com/WebAssembly/component-model/pull/368.
1014    #[cfg(feature = "component-model")]
1015    pub fn wasm_component_model_multiple_returns(&mut self, enable: bool) -> &mut Self {
1016        self.wasm_feature(WasmFeatures::COMPONENT_MODEL_MULTIPLE_RETURNS, enable);
1017        self
1018    }
1019
1020    /// Configures which compilation strategy will be used for wasm modules.
1021    ///
1022    /// This method can be used to configure which compiler is used for wasm
1023    /// modules, and for more documentation consult the [`Strategy`] enumeration
1024    /// and its documentation.
1025    ///
1026    /// The default value for this is `Strategy::Auto`.
1027    #[cfg(any(feature = "cranelift", feature = "winch"))]
1028    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1029        self.compiler_config.strategy = strategy.not_auto();
1030        self
1031    }
1032
1033    /// Creates a default profiler based on the profiling strategy chosen.
1034    ///
1035    /// Profiler creation calls the type's default initializer where the purpose is
1036    /// really just to put in place the type used for profiling.
1037    ///
1038    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1039    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1040    /// feature.
1041    ///
1042    /// # Errors
1043    ///
1044    /// The validation of this field is deferred until the engine is being built, and thus may
1045    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1046    /// supported.
1047    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1048        self.profiling_strategy = profile;
1049        self
1050    }
1051
1052    /// Configures whether the debug verifier of Cranelift is enabled or not.
1053    ///
1054    /// When Cranelift is used as a code generation backend this will configure
1055    /// it to have the `enable_verifier` flag which will enable a number of debug
1056    /// checks inside of Cranelift. This is largely only useful for the
1057    /// developers of wasmtime itself.
1058    ///
1059    /// The default value for this is `false`
1060    #[cfg(any(feature = "cranelift", feature = "winch"))]
1061    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1062        let val = if enable { "true" } else { "false" };
1063        self.compiler_config
1064            .settings
1065            .insert("enable_verifier".to_string(), val.to_string());
1066        self
1067    }
1068
1069    /// Configures the Cranelift code generator optimization level.
1070    ///
1071    /// When the Cranelift code generator is used you can configure the
1072    /// optimization level used for generated code in a few various ways. For
1073    /// more information see the documentation of [`OptLevel`].
1074    ///
1075    /// The default value for this is `OptLevel::None`.
1076    #[cfg(any(feature = "cranelift", feature = "winch"))]
1077    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1078        let val = match level {
1079            OptLevel::None => "none",
1080            OptLevel::Speed => "speed",
1081            OptLevel::SpeedAndSize => "speed_and_size",
1082        };
1083        self.compiler_config
1084            .settings
1085            .insert("opt_level".to_string(), val.to_string());
1086        self
1087    }
1088
1089    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1090    ///
1091    /// When Cranelift is used as a code generation backend this will configure
1092    /// it to replace NaNs with a single canonical value. This is useful for
1093    /// users requiring entirely deterministic WebAssembly computation.  This is
1094    /// not required by the WebAssembly spec, so it is not enabled by default.
1095    ///
1096    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1097    /// types but additionally the `v128` type. This option will cause
1098    /// operations using any of these types to have extra checks placed after
1099    /// them to normalize NaN values as needed.
1100    ///
1101    /// The default value for this is `false`
1102    #[cfg(any(feature = "cranelift", feature = "winch"))]
1103    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1104        let val = if enable { "true" } else { "false" };
1105        self.compiler_config
1106            .settings
1107            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1108        self
1109    }
1110
1111    /// Controls whether proof-carrying code (PCC) is used to validate
1112    /// lowering of Wasm sandbox checks.
1113    ///
1114    /// Proof-carrying code carries "facts" about program values from
1115    /// the IR all the way to machine code, and checks those facts
1116    /// against known machine-instruction semantics. This guards
1117    /// against bugs in instruction lowering that might create holes
1118    /// in the Wasm sandbox.
1119    ///
1120    /// PCC is designed to be fast: it does not require complex
1121    /// solvers or logic engines to verify, but only a linear pass
1122    /// over a trail of "breadcrumbs" or facts at each intermediate
1123    /// value. Thus, it is appropriate to enable in production.
1124    #[cfg(any(feature = "cranelift", feature = "winch"))]
1125    pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1126        let val = if enable { "true" } else { "false" };
1127        self.compiler_config
1128            .settings
1129            .insert("enable_pcc".to_string(), val.to_string());
1130        self
1131    }
1132
1133    /// Allows setting a Cranelift boolean flag or preset. This allows
1134    /// fine-tuning of Cranelift settings.
1135    ///
1136    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1137    /// either; other `Config` functions should be preferred for stability.
1138    ///
1139    /// # Safety
1140    ///
1141    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1142    /// resulting in execution hazards.
1143    ///
1144    /// # Errors
1145    ///
1146    /// The validation of the flags are deferred until the engine is being built, and thus may
1147    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1148    /// for the flag type.
1149    #[cfg(any(feature = "cranelift", feature = "winch"))]
1150    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1151        self.compiler_config.flags.insert(flag.to_string());
1152        self
1153    }
1154
1155    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1156    /// fine-tuning of Cranelift settings.
1157    ///
1158    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1159    /// either; other `Config` functions should be preferred for stability.
1160    ///
1161    /// # Safety
1162    ///
1163    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1164    /// resulting in execution hazards.
1165    ///
1166    /// # Errors
1167    ///
1168    /// The validation of the flags are deferred until the engine is being built, and thus may
1169    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1170    /// settings.
1171    ///
1172    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1173    /// manually set to false then it will fail.
1174    #[cfg(any(feature = "cranelift", feature = "winch"))]
1175    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1176        self.compiler_config
1177            .settings
1178            .insert(name.to_string(), value.to_string());
1179        self
1180    }
1181
1182    /// Loads cache configuration specified at `path`.
1183    ///
1184    /// This method will read the file specified by `path` on the filesystem and
1185    /// attempt to load cache configuration from it. This method can also fail
1186    /// due to I/O errors, misconfiguration, syntax errors, etc. For expected
1187    /// syntax in the configuration file see the [documentation online][docs].
1188    ///
1189    /// By default cache configuration is not enabled or loaded.
1190    ///
1191    /// This method is only available when the `cache` feature of this crate is
1192    /// enabled.
1193    ///
1194    /// # Errors
1195    ///
1196    /// This method can fail due to any error that happens when loading the file
1197    /// pointed to by `path` and attempting to load the cache configuration.
1198    ///
1199    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1200    #[cfg(feature = "cache")]
1201    pub fn cache_config_load(&mut self, path: impl AsRef<Path>) -> Result<&mut Self> {
1202        self.cache_config = CacheConfig::from_file(Some(path.as_ref()))?;
1203        Ok(self)
1204    }
1205
1206    /// Disable caching.
1207    ///
1208    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will
1209    /// recompile `my_wasm`, even when it is unchanged.
1210    ///
1211    /// By default, new configs do not have caching enabled. This method is only
1212    /// useful for disabling a previous cache configuration.
1213    ///
1214    /// This method is only available when the `cache` feature of this crate is
1215    /// enabled.
1216    #[cfg(feature = "cache")]
1217    pub fn disable_cache(&mut self) -> &mut Self {
1218        self.cache_config = CacheConfig::new_cache_disabled();
1219        self
1220    }
1221
1222    /// Loads cache configuration from the system default path.
1223    ///
1224    /// This commit is the same as [`Config::cache_config_load`] except that it
1225    /// does not take a path argument and instead loads the default
1226    /// configuration present on the system. This is located, for example, on
1227    /// Unix at `$HOME/.config/wasmtime/config.toml` and is typically created
1228    /// with the `wasmtime config new` command.
1229    ///
1230    /// By default cache configuration is not enabled or loaded.
1231    ///
1232    /// This method is only available when the `cache` feature of this crate is
1233    /// enabled.
1234    ///
1235    /// # Errors
1236    ///
1237    /// This method can fail due to any error that happens when loading the
1238    /// default system configuration. Note that it is not an error if the
1239    /// default config file does not exist, in which case the default settings
1240    /// for an enabled cache are applied.
1241    ///
1242    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1243    #[cfg(feature = "cache")]
1244    pub fn cache_config_load_default(&mut self) -> Result<&mut Self> {
1245        self.cache_config = CacheConfig::from_file(None)?;
1246        Ok(self)
1247    }
1248
1249    /// Sets a custom memory creator.
1250    ///
1251    /// Custom memory creators are used when creating host `Memory` objects or when
1252    /// creating instance linear memories for the on-demand instance allocation strategy.
1253    #[cfg(feature = "runtime")]
1254    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1255        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1256        self
1257    }
1258
1259    /// Sets a custom stack creator.
1260    ///
1261    /// Custom memory creators are used when creating creating async instance stacks for
1262    /// the on-demand instance allocation strategy.
1263    #[cfg(feature = "async")]
1264    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1265        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1266        self
1267    }
1268
1269    /// Sets the instance allocation strategy to use.
1270    ///
1271    /// When using the pooling instance allocation strategy, all linear memories
1272    /// will be created as "static" and the
1273    /// [`Config::static_memory_maximum_size`] and
1274    /// [`Config::static_memory_guard_size`] options will be used to configure
1275    /// the virtual memory allocations of linear memories.
1276    pub fn allocation_strategy(&mut self, strategy: InstanceAllocationStrategy) -> &mut Self {
1277        self.allocation_strategy = strategy;
1278        self
1279    }
1280
1281    /// Configures the maximum size, in bytes, where a linear memory is
1282    /// considered static, above which it'll be considered dynamic.
1283    ///
1284    /// > Note: this value has important performance ramifications, be sure to
1285    /// > understand what this value does before tweaking it and benchmarking.
1286    ///
1287    /// This function configures the threshold for wasm memories whether they're
1288    /// implemented as a dynamically relocatable chunk of memory or a statically
1289    /// located chunk of memory. The `max_size` parameter here is the size, in
1290    /// bytes, where if the maximum size of a linear memory is below `max_size`
1291    /// then it will be statically allocated with enough space to never have to
1292    /// move. If the maximum size of a linear memory is larger than `max_size`
1293    /// then wasm memory will be dynamically located and may move in memory
1294    /// through growth operations.
1295    ///
1296    /// Specifying a `max_size` of 0 means that all memories will be dynamic and
1297    /// may be relocated through `memory.grow`. Also note that if any wasm
1298    /// memory's maximum size is below `max_size` then it will still reserve
1299    /// `max_size` bytes in the virtual memory space.
1300    ///
1301    /// ## Static vs Dynamic Memory
1302    ///
1303    /// Linear memories represent contiguous arrays of bytes, but they can also
1304    /// be grown through the API and wasm instructions. When memory is grown if
1305    /// space hasn't been preallocated then growth may involve relocating the
1306    /// base pointer in memory. Memories in Wasmtime are classified in two
1307    /// different ways:
1308    ///
1309    /// * **static** - these memories preallocate all space necessary they'll
1310    ///   ever need, meaning that the base pointer of these memories is never
1311    ///   moved. Static memories may take more virtual memory space because of
1312    ///   pre-reserving space for memories.
1313    ///
1314    /// * **dynamic** - these memories are not preallocated and may move during
1315    ///   growth operations. Dynamic memories consume less virtual memory space
1316    ///   because they don't need to preallocate space for future growth.
1317    ///
1318    /// Static memories can be optimized better in JIT code because once the
1319    /// base address is loaded in a function it's known that we never need to
1320    /// reload it because it never changes, `memory.grow` is generally a pretty
1321    /// fast operation because the wasm memory is never relocated, and under
1322    /// some conditions bounds checks can be elided on memory accesses.
1323    ///
1324    /// Dynamic memories can't be quite as heavily optimized because the base
1325    /// address may need to be reloaded more often, they may require relocating
1326    /// lots of data on `memory.grow`, and dynamic memories require
1327    /// unconditional bounds checks on all memory accesses.
1328    ///
1329    /// ## Should you use static or dynamic memory?
1330    ///
1331    /// In general you probably don't need to change the value of this property.
1332    /// The defaults here are optimized for each target platform to consume a
1333    /// reasonable amount of physical memory while also generating speedy
1334    /// machine code.
1335    ///
1336    /// One of the main reasons you may want to configure this today is if your
1337    /// environment can't reserve virtual memory space for each wasm linear
1338    /// memory. On 64-bit platforms wasm memories require a 6GB reservation by
1339    /// default, and system limits may prevent this in some scenarios. In this
1340    /// case you may wish to force memories to be allocated dynamically meaning
1341    /// that the virtual memory footprint of creating a wasm memory should be
1342    /// exactly what's used by the wasm itself.
1343    ///
1344    /// For 32-bit memories a static memory must contain at least 4GB of
1345    /// reserved address space plus a guard page to elide any bounds checks at
1346    /// all. Smaller static memories will use similar bounds checks as dynamic
1347    /// memories.
1348    ///
1349    /// ## Default
1350    ///
1351    /// The default value for this property depends on the host platform. For
1352    /// 64-bit platforms there's lots of address space available, so the default
1353    /// configured here is 4GB. WebAssembly linear memories currently max out at
1354    /// 4GB which means that on 64-bit platforms Wasmtime by default always uses
1355    /// a static memory. This, coupled with a sufficiently sized guard region,
1356    /// should produce the fastest JIT code on 64-bit platforms, but does
1357    /// require a large address space reservation for each wasm memory.
1358    ///
1359    /// For 32-bit platforms this value defaults to 1GB. This means that wasm
1360    /// memories whose maximum size is less than 1GB will be allocated
1361    /// statically, otherwise they'll be considered dynamic.
1362    ///
1363    /// ## Static Memory and Pooled Instance Allocation
1364    ///
1365    /// When using the pooling instance allocator memories are considered to
1366    /// always be static memories, they are never dynamic. This setting
1367    /// configures the size of linear memory to reserve for each memory in the
1368    /// pooling allocator.
1369    ///
1370    /// Note that the pooling allocator can reduce the amount of memory needed
1371    /// for pooling allocation by using memory protection; see
1372    /// `PoolingAllocatorConfig::memory_protection_keys` for details.
1373    pub fn static_memory_maximum_size(&mut self, max_size: u64) -> &mut Self {
1374        self.tunables.static_memory_reservation = Some(max_size);
1375        self
1376    }
1377
1378    /// Indicates that the "static" style of memory should always be used.
1379    ///
1380    /// This configuration option enables selecting the "static" option for all
1381    /// linear memories created within this `Config`. This means that all
1382    /// memories will be allocated up-front and will never move. Additionally
1383    /// this means that all memories are synthetically limited by the
1384    /// [`Config::static_memory_maximum_size`] option, regardless of what the
1385    /// actual maximum size is on the memory's original type.
1386    ///
1387    /// For the difference between static and dynamic memories, see the
1388    /// [`Config::static_memory_maximum_size`].
1389    pub fn static_memory_forced(&mut self, force: bool) -> &mut Self {
1390        self.tunables.static_memory_bound_is_maximum = Some(force);
1391        self
1392    }
1393
1394    /// Configures the size, in bytes, of the guard region used at the end of a
1395    /// static memory's address space reservation.
1396    ///
1397    /// > Note: this value has important performance ramifications, be sure to
1398    /// > understand what this value does before tweaking it and benchmarking.
1399    ///
1400    /// All WebAssembly loads/stores are bounds-checked and generate a trap if
1401    /// they're out-of-bounds. Loads and stores are often very performance
1402    /// critical, so we want the bounds check to be as fast as possible!
1403    /// Accelerating these memory accesses is the motivation for a guard after a
1404    /// memory allocation.
1405    ///
1406    /// Memories (both static and dynamic) can be configured with a guard at the
1407    /// end of them which consists of unmapped virtual memory. This unmapped
1408    /// memory will trigger a memory access violation (e.g. segfault) if
1409    /// accessed. This allows JIT code to elide bounds checks if it can prove
1410    /// that an access, if out of bounds, would hit the guard region. This means
1411    /// that having such a guard of unmapped memory can remove the need for
1412    /// bounds checks in JIT code.
1413    ///
1414    /// For the difference between static and dynamic memories, see the
1415    /// [`Config::static_memory_maximum_size`].
1416    ///
1417    /// ## How big should the guard be?
1418    ///
1419    /// In general, like with configuring `static_memory_maximum_size`, you
1420    /// probably don't want to change this value from the defaults. Otherwise,
1421    /// though, the size of the guard region affects the number of bounds checks
1422    /// needed for generated wasm code. More specifically, loads/stores with
1423    /// immediate offsets will generate bounds checks based on how big the guard
1424    /// page is.
1425    ///
1426    /// For 32-bit wasm memories a 4GB static memory is required to even start
1427    /// removing bounds checks. A 4GB guard size will guarantee that the module
1428    /// has zero bounds checks for memory accesses. A 2GB guard size will
1429    /// eliminate all bounds checks with an immediate offset less than 2GB. A
1430    /// guard size of zero means that all memory accesses will still have bounds
1431    /// checks.
1432    ///
1433    /// ## Default
1434    ///
1435    /// The default value for this property is 2GB on 64-bit platforms. This
1436    /// allows eliminating almost all bounds checks on loads/stores with an
1437    /// immediate offset of less than 2GB. On 32-bit platforms this defaults to
1438    /// 64KB.
1439    ///
1440    /// ## Errors
1441    ///
1442    /// The `Engine::new` method will return an error if this option is smaller
1443    /// than the value configured for [`Config::dynamic_memory_guard_size`].
1444    pub fn static_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
1445        self.tunables.static_memory_offset_guard_size = Some(guard_size);
1446        self
1447    }
1448
1449    /// Configures the size, in bytes, of the guard region used at the end of a
1450    /// dynamic memory's address space reservation.
1451    ///
1452    /// For the difference between static and dynamic memories, see the
1453    /// [`Config::static_memory_maximum_size`]
1454    ///
1455    /// For more information about what a guard is, see the documentation on
1456    /// [`Config::static_memory_guard_size`].
1457    ///
1458    /// Note that the size of the guard region for dynamic memories is not super
1459    /// critical for performance. Making it reasonably-sized can improve
1460    /// generated code slightly, but for maximum performance you'll want to lean
1461    /// towards static memories rather than dynamic anyway.
1462    ///
1463    /// Also note that the dynamic memory guard size must be smaller than the
1464    /// static memory guard size, so if a large dynamic memory guard is
1465    /// specified then the static memory guard size will also be automatically
1466    /// increased.
1467    ///
1468    /// ## Default
1469    ///
1470    /// This value defaults to 64KB.
1471    ///
1472    /// ## Errors
1473    ///
1474    /// The `Engine::new` method will return an error if this option is larger
1475    /// than the value configured for [`Config::static_memory_guard_size`].
1476    pub fn dynamic_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
1477        self.tunables.dynamic_memory_offset_guard_size = Some(guard_size);
1478        self
1479    }
1480
1481    /// Configures the size, in bytes, of the extra virtual memory space
1482    /// reserved after a "dynamic" memory for growing into.
1483    ///
1484    /// For the difference between static and dynamic memories, see the
1485    /// [`Config::static_memory_maximum_size`]
1486    ///
1487    /// Dynamic memories can be relocated in the process's virtual address space
1488    /// on growth and do not always reserve their entire space up-front. This
1489    /// means that a growth of the memory may require movement in the address
1490    /// space, which in the worst case can copy a large number of bytes from one
1491    /// region to another.
1492    ///
1493    /// This setting configures how many bytes are reserved after the initial
1494    /// reservation for a dynamic memory for growing into. A value of 0 here
1495    /// means that no extra bytes are reserved and all calls to `memory.grow`
1496    /// will need to relocate the wasm linear memory (copying all the bytes). A
1497    /// value of 1 megabyte, however, means that `memory.grow` can allocate up
1498    /// to a megabyte of extra memory before the memory needs to be moved in
1499    /// linear memory.
1500    ///
1501    /// Note that this is a currently simple heuristic for optimizing the growth
1502    /// of dynamic memories, primarily implemented for the memory64 proposal
1503    /// where all memories are currently "dynamic". This is unlikely to be a
1504    /// one-size-fits-all style approach and if you're an embedder running into
1505    /// issues with dynamic memories and growth and are interested in having
1506    /// other growth strategies available here please feel free to [open an
1507    /// issue on the Wasmtime repository][issue]!
1508    ///
1509    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/ne
1510    ///
1511    /// ## Default
1512    ///
1513    /// For 64-bit platforms this defaults to 2GB, and for 32-bit platforms this
1514    /// defaults to 1MB.
1515    pub fn dynamic_memory_reserved_for_growth(&mut self, reserved: u64) -> &mut Self {
1516        self.tunables.dynamic_memory_growth_reserve = Some(reserved);
1517        self
1518    }
1519
1520    /// Indicates whether a guard region is present before allocations of
1521    /// linear memory.
1522    ///
1523    /// Guard regions before linear memories are never used during normal
1524    /// operation of WebAssembly modules, even if they have out-of-bounds
1525    /// loads. The only purpose for a preceding guard region in linear memory
1526    /// is extra protection against possible bugs in code generators like
1527    /// Cranelift. This setting does not affect performance in any way, but will
1528    /// result in larger virtual memory reservations for linear memories (it
1529    /// won't actually ever use more memory, just use more of the address
1530    /// space).
1531    ///
1532    /// The size of the guard region before linear memory is the same as the
1533    /// guard size that comes after linear memory, which is configured by
1534    /// [`Config::static_memory_guard_size`] and
1535    /// [`Config::dynamic_memory_guard_size`].
1536    ///
1537    /// ## Default
1538    ///
1539    /// This value defaults to `true`.
1540    pub fn guard_before_linear_memory(&mut self, guard: bool) -> &mut Self {
1541        self.tunables.guard_before_linear_memory = Some(guard);
1542        self
1543    }
1544
1545    /// Indicates whether to initialize tables lazily, so that instantiation
1546    /// is fast but indirect calls are a little slower. If false, tables
1547    /// are initialized eagerly during instantiation from any active element
1548    /// segments that apply to them.
1549    ///
1550    /// ## Default
1551    ///
1552    /// This value defaults to `true`.
1553    pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1554        self.tunables.table_lazy_init = Some(table_lazy_init);
1555        self
1556    }
1557
1558    /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1559    /// This effects the behavior of [`crate::Module::serialize()`], as well as
1560    /// [`crate::Module::deserialize()`] and related functions.
1561    ///
1562    /// The default strategy is to use the wasmtime crate's Cargo package version.
1563    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1564        match strategy {
1565            // This case requires special precondition for assertion in SerializedModule::to_bytes
1566            ModuleVersionStrategy::Custom(ref v) => {
1567                if v.as_bytes().len() > 255 {
1568                    bail!("custom module version cannot be more than 255 bytes: {}", v);
1569                }
1570            }
1571            _ => {}
1572        }
1573        self.module_version = strategy;
1574        Ok(self)
1575    }
1576
1577    /// Configure whether wasmtime should compile a module using multiple
1578    /// threads.
1579    ///
1580    /// Disabling this will result in a single thread being used to compile
1581    /// the wasm bytecode.
1582    ///
1583    /// By default parallel compilation is enabled.
1584    #[cfg(feature = "parallel-compilation")]
1585    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1586        self.parallel_compilation = parallel;
1587        self
1588    }
1589
1590    /// Configures whether compiled artifacts will contain information to map
1591    /// native program addresses back to the original wasm module.
1592    ///
1593    /// This configuration option is `true` by default and, if enabled,
1594    /// generates the appropriate tables in compiled modules to map from native
1595    /// address back to wasm source addresses. This is used for displaying wasm
1596    /// program counters in backtraces as well as generating filenames/line
1597    /// numbers if so configured as well (and the original wasm module has DWARF
1598    /// debugging information present).
1599    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1600        self.tunables.generate_address_map = Some(generate);
1601        self
1602    }
1603
1604    /// Configures whether copy-on-write memory-mapped data is used to
1605    /// initialize a linear memory.
1606    ///
1607    /// Initializing linear memory via a copy-on-write mapping can drastically
1608    /// improve instantiation costs of a WebAssembly module because copying
1609    /// memory is deferred. Additionally if a page of memory is only ever read
1610    /// from WebAssembly and never written too then the same underlying page of
1611    /// data will be reused between all instantiations of a module meaning that
1612    /// if a module is instantiated many times this can lower the overall memory
1613    /// required needed to run that module.
1614    ///
1615    /// The main disadvantage of copy-on-write initialization, however, is that
1616    /// it may be possible for highly-parallel scenarios to be less scalable. If
1617    /// a page is read initially by a WebAssembly module then that page will be
1618    /// mapped to a read-only copy shared between all WebAssembly instances. If
1619    /// the same page is then written, however, then a private copy is created
1620    /// and swapped out from the read-only version. This also requires an [IPI],
1621    /// however, which can be a significant bottleneck in high-parallelism
1622    /// situations.
1623    ///
1624    /// This feature is only applicable when a WebAssembly module meets specific
1625    /// criteria to be initialized in this fashion, such as:
1626    ///
1627    /// * Only memories defined in the module can be initialized this way.
1628    /// * Data segments for memory must use statically known offsets.
1629    /// * Data segments for memory must all be in-bounds.
1630    ///
1631    /// Modules which do not meet these criteria will fall back to
1632    /// initialization of linear memory based on copying memory.
1633    ///
1634    /// This feature of Wasmtime is also platform-specific:
1635    ///
1636    /// * Linux - this feature is supported for all instances of [`Module`].
1637    ///   Modules backed by an existing mmap (such as those created by
1638    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
1639    ///   memory. Other instance of [`Module`] may use the `memfd_create`
1640    ///   syscall to create an initialization image to `mmap`.
1641    /// * Unix (not Linux) - this feature is only supported when loading modules
1642    ///   from a precompiled file via [`Module::deserialize_file`] where there
1643    ///   is a file descriptor to use to map data into the process. Note that
1644    ///   the module must have been compiled with this setting enabled as well.
1645    /// * Windows - there is no support for this feature at this time. Memory
1646    ///   initialization will always copy bytes.
1647    ///
1648    /// By default this option is enabled.
1649    ///
1650    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
1651    /// [`Module`]: crate::Module
1652    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
1653    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
1654        self.memory_init_cow = enable;
1655        self
1656    }
1657
1658    /// A configuration option to force the usage of `memfd_create` on Linux to
1659    /// be used as the backing source for a module's initial memory image.
1660    ///
1661    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
1662    /// default, module memory initialization images are taken from a module's
1663    /// original mmap if possible. If a precompiled module was loaded from disk
1664    /// this means that the disk's file is used as an mmap source for the
1665    /// initial linear memory contents. This option can be used to force, on
1666    /// Linux, that instead of using the original file on disk a new in-memory
1667    /// file is created with `memfd_create` to hold the contents of the initial
1668    /// image.
1669    ///
1670    /// This option can be used to avoid possibly loading the contents of memory
1671    /// from disk through a page fault. Instead with `memfd_create` the contents
1672    /// of memory are always in RAM, meaning that even page faults which
1673    /// initially populate a wasm linear memory will only work with RAM instead
1674    /// of ever hitting the disk that the original precompiled module is stored
1675    /// on.
1676    ///
1677    /// This option is disabled by default.
1678    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
1679        self.force_memory_init_memfd = enable;
1680        self
1681    }
1682
1683    /// Configures whether or not a coredump should be generated and attached to
1684    /// the anyhow::Error when a trap is raised.
1685    ///
1686    /// This option is disabled by default.
1687    #[cfg(feature = "coredump")]
1688    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
1689        self.coredump_on_trap = enable;
1690        self
1691    }
1692
1693    /// Enables memory error checking for wasm programs.
1694    ///
1695    /// This option is disabled by default.
1696    #[cfg(any(feature = "cranelift", feature = "winch"))]
1697    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
1698        self.wmemcheck = enable;
1699        self.compiler_config.wmemcheck = enable;
1700        self
1701    }
1702
1703    /// Configures the "guaranteed dense image size" for copy-on-write
1704    /// initialized memories.
1705    ///
1706    /// When using the [`Config::memory_init_cow`] feature to initialize memory
1707    /// efficiently (which is enabled by default), compiled modules contain an
1708    /// image of the module's initial heap. If the module has a fairly sparse
1709    /// initial heap, with just a few data segments at very different offsets,
1710    /// this could result in a large region of zero bytes in the image. In
1711    /// other words, it's not very memory-efficient.
1712    ///
1713    /// We normally use a heuristic to avoid this: if less than half
1714    /// of the initialized range (first non-zero to last non-zero
1715    /// byte) of any memory in the module has pages with nonzero
1716    /// bytes, then we avoid creating a memory image for the entire module.
1717    ///
1718    /// However, if the embedder always needs the instantiation-time efficiency
1719    /// of copy-on-write initialization, and is otherwise carefully controlling
1720    /// parameters of the modules (for example, by limiting the maximum heap
1721    /// size of the modules), then it may be desirable to ensure a memory image
1722    /// is created even if this could go against the heuristic above. Thus, we
1723    /// add another condition: there is a size of initialized data region up to
1724    /// which we *always* allow a memory image. The embedder can set this to a
1725    /// known maximum heap size if they desire to always get the benefits of
1726    /// copy-on-write images.
1727    ///
1728    /// In the future we may implement a "best of both worlds"
1729    /// solution where we have a dense image up to some limit, and
1730    /// then support a sparse list of initializers beyond that; this
1731    /// would get most of the benefit of copy-on-write and pay the incremental
1732    /// cost of eager initialization only for those bits of memory
1733    /// that are out-of-bounds. However, for now, an embedder desiring
1734    /// fast instantiation should ensure that this setting is as large
1735    /// as the maximum module initial memory content size.
1736    ///
1737    /// By default this value is 16 MiB.
1738    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
1739        self.memory_guaranteed_dense_image_size = size_in_bytes;
1740        self
1741    }
1742
1743    /// Returns the set of features that the currently selected compiler backend
1744    /// does not support at all and may panic on.
1745    ///
1746    /// Wasmtime strives to reject unknown modules or unsupported modules with
1747    /// first-class errors instead of panics. Not all compiler backends have the
1748    /// same level of feature support on all platforms as well. This method
1749    /// returns a set of features that the currently selected compiler
1750    /// configuration is known to not support and may panic on. This acts as a
1751    /// first-level filter on incoming wasm modules/configuration to fail-fast
1752    /// instead of panicking later on.
1753    ///
1754    /// Note that if a feature is not listed here it does not mean that the
1755    /// backend fully supports the proposal. Instead that means that the backend
1756    /// doesn't ever panic on the proposal, but errors during compilation may
1757    /// still be returned. This means that features listed here are definitely
1758    /// not supported at all, but features not listed here may still be
1759    /// partially supported. For example at the time of this writing the Winch
1760    /// backend partially supports simd so it's not listed here. Winch doesn't
1761    /// fully support simd but unimplemented instructions just return errors.
1762    fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
1763        #[cfg(any(feature = "cranelift", feature = "winch"))]
1764        match self.compiler_config.strategy {
1765            None | Some(Strategy::Cranelift) => WasmFeatures::empty(),
1766            Some(Strategy::Winch) => {
1767                let mut unsupported = WasmFeatures::GC
1768                    | WasmFeatures::FUNCTION_REFERENCES
1769                    | WasmFeatures::THREADS
1770                    | WasmFeatures::RELAXED_SIMD
1771                    | WasmFeatures::TAIL_CALL
1772                    | WasmFeatures::GC_TYPES;
1773                match self.compiler_target().architecture {
1774                    target_lexicon::Architecture::Aarch64(_) => {
1775                        // no support for simd on aarch64
1776                        unsupported |= WasmFeatures::SIMD;
1777
1778                        // things like multi-table are technically supported on
1779                        // winch on aarch64 but this helps gate most spec tests
1780                        // by default which otherwise currently cause panics.
1781                        unsupported |= WasmFeatures::REFERENCE_TYPES;
1782                    }
1783
1784                    // Winch doesn't support other non-x64 architectures at this
1785                    // time either but will return an first-class error for
1786                    // them.
1787                    _ => {}
1788                }
1789                unsupported
1790            }
1791            Some(Strategy::Auto) => unreachable!(),
1792        }
1793        #[cfg(not(any(feature = "cranelift", feature = "winch")))]
1794        return WasmFeatures::empty();
1795    }
1796
1797    /// Calculates the set of features that are enabled for this `Config`.
1798    ///
1799    /// This method internally will start with the an empty set of features to
1800    /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
1801    /// default features are added to this set, some of which are conditional
1802    /// depending on crate features. Finally explicitly requested features via
1803    /// `wasm_*` methods on `Config` are applied. Everything is then validated
1804    /// later in `Config::validate`.
1805    fn features(&self) -> WasmFeatures {
1806        // Wasmtime by default supports all of the wasm 2.0 version of the
1807        // specification.
1808        let mut features = WasmFeatures::WASM2;
1809
1810        // On-by-default features that wasmtime has. Note that these are all
1811        // subject to the criteria at
1812        // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
1813        features |= WasmFeatures::MULTI_MEMORY;
1814        features |= WasmFeatures::RELAXED_SIMD;
1815        features |= WasmFeatures::TAIL_CALL;
1816        features |= WasmFeatures::EXTENDED_CONST;
1817
1818        // Set some features to their conditionally-enabled defaults depending
1819        // on crate compile-time features.
1820        features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
1821        features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
1822        features.set(
1823            WasmFeatures::COMPONENT_MODEL,
1824            cfg!(feature = "component-model"),
1825        );
1826
1827        // From the default set of proposals remove any that the current
1828        // compiler backend may panic on if the module contains them.
1829        features = features & !self.compiler_panicking_wasm_features();
1830
1831        // After wasmtime's defaults are configured then factor in user requests
1832        // and disable/enable features. Note that the enable/disable sets should
1833        // be disjoint.
1834        debug_assert!((self.enabled_features & self.disabled_features).is_empty());
1835        features &= !self.disabled_features;
1836        features |= self.enabled_features;
1837
1838        features
1839    }
1840
1841    fn compiler_target(&self) -> target_lexicon::Triple {
1842        #[cfg(any(feature = "cranelift", feature = "winch"))]
1843        {
1844            let host = target_lexicon::Triple::host();
1845
1846            self.compiler_config
1847                .target
1848                .as_ref()
1849                .unwrap_or(&host)
1850                .clone()
1851        }
1852        #[cfg(not(any(feature = "cranelift", feature = "winch")))]
1853        {
1854            target_lexicon::Triple::host()
1855        }
1856    }
1857
1858    pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
1859        let features = self.features();
1860
1861        // First validate that the selected compiler backend and configuration
1862        // supports the set of `features` that are enabled. This will help
1863        // provide more first class errors instead of panics about unsupported
1864        // features and configurations.
1865        let unsupported = features & self.compiler_panicking_wasm_features();
1866        if !unsupported.is_empty() {
1867            for flag in WasmFeatures::FLAGS.iter() {
1868                if !unsupported.contains(*flag.value()) {
1869                    continue;
1870                }
1871                bail!(
1872                    "the wasm_{} feature is not supported on this compiler configuration",
1873                    flag.name().to_lowercase()
1874                );
1875            }
1876
1877            panic!("should have returned an error by now")
1878        }
1879
1880        if features.contains(WasmFeatures::REFERENCE_TYPES)
1881            && !features.contains(WasmFeatures::BULK_MEMORY)
1882        {
1883            bail!("feature 'reference_types' requires 'bulk_memory' to be enabled");
1884        }
1885        if features.contains(WasmFeatures::THREADS) && !features.contains(WasmFeatures::BULK_MEMORY)
1886        {
1887            bail!("feature 'threads' requires 'bulk_memory' to be enabled");
1888        }
1889        if features.contains(WasmFeatures::FUNCTION_REFERENCES)
1890            && !features.contains(WasmFeatures::REFERENCE_TYPES)
1891        {
1892            bail!("feature 'function_references' requires 'reference_types' to be enabled");
1893        }
1894        if features.contains(WasmFeatures::GC)
1895            && !features.contains(WasmFeatures::FUNCTION_REFERENCES)
1896        {
1897            bail!("feature 'gc' requires 'function_references' to be enabled");
1898        }
1899        #[cfg(feature = "async")]
1900        if self.async_support && self.max_wasm_stack > self.async_stack_size {
1901            bail!("max_wasm_stack size cannot exceed the async_stack_size");
1902        }
1903        if self.max_wasm_stack == 0 {
1904            bail!("max_wasm_stack size cannot be zero");
1905        }
1906        #[cfg(not(feature = "wmemcheck"))]
1907        if self.wmemcheck {
1908            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
1909        }
1910
1911        #[cfg(not(any(feature = "cranelift", feature = "winch")))]
1912        let mut tunables = Tunables::default_host();
1913        #[cfg(any(feature = "cranelift", feature = "winch"))]
1914        let mut tunables = match &self.compiler_config.target.as_ref() {
1915            Some(target) => Tunables::default_for_target(target)?,
1916            None => Tunables::default_host(),
1917        };
1918
1919        macro_rules! set_fields {
1920            ($($field:ident)*) => (
1921                let ConfigTunables {
1922                    $($field,)*
1923                } = &self.tunables;
1924
1925                $(
1926                    if let Some(e) = $field {
1927                        tunables.$field = *e;
1928                    }
1929                )*
1930            )
1931        }
1932
1933        set_fields! {
1934            static_memory_reservation
1935            static_memory_offset_guard_size
1936            dynamic_memory_offset_guard_size
1937            dynamic_memory_growth_reserve
1938            generate_native_debuginfo
1939            parse_wasm_debuginfo
1940            consume_fuel
1941            epoch_interruption
1942            static_memory_bound_is_maximum
1943            guard_before_linear_memory
1944            table_lazy_init
1945            generate_address_map
1946            debug_adapter_modules
1947            relaxed_simd_deterministic
1948        }
1949
1950        // If we're going to compile with winch, we must use the winch calling convention.
1951        #[cfg(any(feature = "cranelift", feature = "winch"))]
1952        {
1953            tunables.winch_callable = self.compiler_config.strategy == Some(Strategy::Winch);
1954
1955            if tunables.winch_callable && !tunables.table_lazy_init {
1956                bail!("Winch requires the table-lazy-init configuration option");
1957            }
1958        }
1959
1960        if tunables.static_memory_offset_guard_size < tunables.dynamic_memory_offset_guard_size {
1961            bail!("static memory guard size cannot be smaller than dynamic memory guard size");
1962        }
1963
1964        Ok((tunables, features))
1965    }
1966
1967    #[cfg(feature = "runtime")]
1968    pub(crate) fn build_allocator(
1969        &self,
1970        tunables: &Tunables,
1971    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
1972        #[cfg(feature = "async")]
1973        let stack_size = self.async_stack_size;
1974
1975        #[cfg(not(feature = "async"))]
1976        let stack_size = 0;
1977
1978        let _ = tunables;
1979
1980        match &self.allocation_strategy {
1981            InstanceAllocationStrategy::OnDemand => {
1982                #[allow(unused_mut)]
1983                let mut allocator = Box::new(OnDemandInstanceAllocator::new(
1984                    self.mem_creator.clone(),
1985                    stack_size,
1986                ));
1987                #[cfg(feature = "async")]
1988                if let Some(stack_creator) = &self.stack_creator {
1989                    allocator.set_stack_creator(stack_creator.clone());
1990                }
1991                Ok(allocator)
1992            }
1993            #[cfg(feature = "pooling-allocator")]
1994            InstanceAllocationStrategy::Pooling(config) => {
1995                let mut config = config.config;
1996                config.stack_size = stack_size;
1997                Ok(Box::new(crate::runtime::vm::PoolingInstanceAllocator::new(
1998                    &config, tunables,
1999                )?))
2000            }
2001        }
2002    }
2003
2004    #[cfg(feature = "runtime")]
2005    pub(crate) fn build_gc_runtime(&self) -> Result<Arc<dyn GcRuntime>> {
2006        Ok(Arc::new(crate::runtime::vm::default_gc_runtime()) as Arc<dyn GcRuntime>)
2007    }
2008
2009    #[cfg(feature = "runtime")]
2010    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2011        Ok(match self.profiling_strategy {
2012            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2013            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2014            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2015            ProfilingStrategy::None => profiling_agent::new_null(),
2016        })
2017    }
2018
2019    #[cfg(any(feature = "cranelift", feature = "winch"))]
2020    pub(crate) fn build_compiler(
2021        mut self,
2022        tunables: &Tunables,
2023        features: WasmFeatures,
2024    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2025        let target = self.compiler_config.target.clone();
2026
2027        let mut compiler = match self.compiler_config.strategy {
2028            #[cfg(feature = "cranelift")]
2029            Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target)?,
2030            #[cfg(not(feature = "cranelift"))]
2031            Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2032            #[cfg(feature = "winch")]
2033            Some(Strategy::Winch) => wasmtime_winch::builder(target)?,
2034            #[cfg(not(feature = "winch"))]
2035            Some(Strategy::Winch) => bail!("winch support not compiled in"),
2036
2037            None | Some(Strategy::Auto) => unreachable!(),
2038        };
2039
2040        if let Some(path) = &self.compiler_config.clif_dir {
2041            compiler.clif_dir(path)?;
2042        }
2043
2044        // If probestack is enabled for a target, Wasmtime will always use the
2045        // inline strategy which doesn't require us to define a `__probestack`
2046        // function or similar.
2047        self.compiler_config
2048            .settings
2049            .insert("probestack_strategy".into(), "inline".into());
2050
2051        let target = self.compiler_target();
2052
2053        // On supported targets, we enable stack probing by default.
2054        // This is required on Windows because of the way Windows
2055        // commits its stacks, but it's also a good idea on other
2056        // platforms to ensure guard pages are hit for large frame
2057        // sizes.
2058        if probestack_supported(target.architecture) {
2059            self.compiler_config
2060                .flags
2061                .insert("enable_probestack".into());
2062        }
2063
2064        if let Some(unwind_requested) = self.native_unwind_info {
2065            if !self
2066                .compiler_config
2067                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2068            {
2069                bail!("incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings");
2070            }
2071        }
2072
2073        if target.operating_system == target_lexicon::OperatingSystem::Windows {
2074            if !self
2075                .compiler_config
2076                .ensure_setting_unset_or_given("unwind_info", "true")
2077            {
2078                bail!("`native_unwind_info` cannot be disabled on Windows");
2079            }
2080        }
2081
2082        // We require frame pointers for correct stack walking, which is safety
2083        // critical in the presence of reference types, and otherwise it is just
2084        // really bad developer experience to get wrong.
2085        self.compiler_config
2086            .settings
2087            .insert("preserve_frame_pointers".into(), "true".into());
2088
2089        // check for incompatible compiler options and set required values
2090        if features.contains(WasmFeatures::REFERENCE_TYPES) {
2091            if !self
2092                .compiler_config
2093                .ensure_setting_unset_or_given("enable_safepoints", "true")
2094            {
2095                bail!("compiler option 'enable_safepoints' must be enabled when 'reference types' is enabled");
2096            }
2097        }
2098
2099        if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2100            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2101        }
2102
2103        // Apply compiler settings and flags
2104        for (k, v) in self.compiler_config.settings.iter() {
2105            compiler.set(k, v)?;
2106        }
2107        for flag in self.compiler_config.flags.iter() {
2108            compiler.enable(flag)?;
2109        }
2110
2111        #[cfg(feature = "incremental-cache")]
2112        if let Some(cache_store) = &self.compiler_config.cache_store {
2113            compiler.enable_incremental_compilation(cache_store.clone())?;
2114        }
2115
2116        compiler.set_tunables(tunables.clone())?;
2117        compiler.wmemcheck(self.compiler_config.wmemcheck);
2118
2119        Ok((self, compiler.build()?))
2120    }
2121
2122    /// Internal setting for whether adapter modules for components will have
2123    /// extra WebAssembly instructions inserted performing more debug checks
2124    /// then are necessary.
2125    #[cfg(feature = "component-model")]
2126    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2127        self.tunables.debug_adapter_modules = Some(debug);
2128        self
2129    }
2130
2131    /// Enables clif output when compiling a WebAssembly module.
2132    #[cfg(any(feature = "cranelift", feature = "winch"))]
2133    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2134        self.compiler_config.clif_dir = Some(path.to_path_buf());
2135        self
2136    }
2137
2138    /// Configures whether, when on macOS, Mach ports are used for exception
2139    /// handling instead of traditional Unix-based signal handling.
2140    ///
2141    /// WebAssembly traps in Wasmtime are implemented with native faults, for
2142    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2143    /// out-of-bounds memory. Handling this can be configured to either use Unix
2144    /// signals or Mach ports on macOS. By default Mach ports are used.
2145    ///
2146    /// Mach ports enable Wasmtime to work by default with foreign
2147    /// error-handling systems such as breakpad which also use Mach ports to
2148    /// handle signals. In this situation Wasmtime will continue to handle guest
2149    /// faults gracefully while any non-guest faults will get forwarded to
2150    /// process-level handlers such as breakpad. Some more background on this
2151    /// can be found in #2456.
2152    ///
2153    /// A downside of using mach ports, however, is that they don't interact
2154    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2155    /// child process that cannot successfully run WebAssembly. In this
2156    /// situation traditional Unix signal handling should be used as that's
2157    /// inherited and works across forks.
2158    ///
2159    /// If your embedding wants to use a custom error handler which leverages
2160    /// Mach ports and you additionally wish to `fork()` the process and use
2161    /// Wasmtime in the child process that's not currently possible. Please
2162    /// reach out to us if you're in this bucket!
2163    ///
2164    /// This option defaults to `true`, using Mach ports by default.
2165    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2166        self.macos_use_mach_ports = mach_ports;
2167        self
2168    }
2169
2170    /// Configures an embedder-provided function, `detect`, which is used to
2171    /// determine if an ISA-specific feature is available on the current host.
2172    ///
2173    /// This function is used to verify that any features enabled for a compiler
2174    /// backend, such as AVX support on x86\_64, are also available on the host.
2175    /// It is undefined behavior to execute an AVX instruction on a host that
2176    /// doesn't support AVX instructions, for example.
2177    ///
2178    /// When the `std` feature is active on this crate then this function is
2179    /// configured to a default implementation that uses the standard library's
2180    /// feature detection. When the `std` feature is disabled then there is no
2181    /// default available and this method must be called to configure a feature
2182    /// probing function.
2183    ///
2184    /// The `detect` function provided is given a string name of an ISA feature.
2185    /// The function should then return:
2186    ///
2187    /// * `Some(true)` - indicates that the feature was found on the host and it
2188    ///   is supported.
2189    /// * `Some(false)` - the feature name was recognized but it was not
2190    ///   detected on the host, for example the CPU is too old.
2191    /// * `None` - the feature name was not recognized and it's not known
2192    ///   whether it's on the host or not.
2193    ///
2194    /// Feature names passed to `detect` match the same feature name used in the
2195    /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2196    ///
2197    /// # Unsafety
2198    ///
2199    /// This function is `unsafe` because it is undefined behavior to execute
2200    /// instructions that a host does not support. This means that the result of
2201    /// `detect` must be correct for memory safe execution at runtime.
2202    pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2203        self.detect_host_feature = Some(detect);
2204        self
2205    }
2206}
2207
2208impl Default for Config {
2209    fn default() -> Config {
2210        Config::new()
2211    }
2212}
2213
2214impl fmt::Debug for Config {
2215    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2216        let mut f = f.debug_struct("Config");
2217        f.field("debug_info", &self.tunables.generate_native_debuginfo);
2218
2219        // Not every flag in WasmFeatures can be enabled as part of creating
2220        // a Config. This impl gives a complete picture of all WasmFeatures
2221        // enabled, and doesn't require maintenance by hand (which has become out
2222        // of date in the past), at the cost of possible confusion for why
2223        // a flag in this set doesn't have a Config setter.
2224        let features = self.features();
2225        for flag in WasmFeatures::FLAGS.iter() {
2226            f.field(
2227                &format!("wasm_{}", flag.name().to_lowercase()),
2228                &features.contains(*flag.value()),
2229            );
2230        }
2231
2232        f.field("parallel_compilation", &self.parallel_compilation);
2233        #[cfg(any(feature = "cranelift", feature = "winch"))]
2234        {
2235            f.field("compiler_config", &self.compiler_config);
2236        }
2237
2238        if let Some(enable) = self.tunables.parse_wasm_debuginfo {
2239            f.field("parse_wasm_debuginfo", &enable);
2240        }
2241        if let Some(size) = self.tunables.static_memory_reservation {
2242            f.field("static_memory_maximum_reservation", &size);
2243        }
2244        if let Some(size) = self.tunables.static_memory_offset_guard_size {
2245            f.field("static_memory_guard_size", &size);
2246        }
2247        if let Some(size) = self.tunables.dynamic_memory_offset_guard_size {
2248            f.field("dynamic_memory_guard_size", &size);
2249        }
2250        if let Some(enable) = self.tunables.guard_before_linear_memory {
2251            f.field("guard_before_linear_memory", &enable);
2252        }
2253        f.finish()
2254    }
2255}
2256
2257/// Possible Compilation strategies for a wasm module.
2258///
2259/// This is used as an argument to the [`Config::strategy`] method.
2260#[non_exhaustive]
2261#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2262pub enum Strategy {
2263    /// An indicator that the compilation strategy should be automatically
2264    /// selected.
2265    ///
2266    /// This is generally what you want for most projects and indicates that the
2267    /// `wasmtime` crate itself should make the decision about what the best
2268    /// code generator for a wasm module is.
2269    ///
2270    /// Currently this always defaults to Cranelift, but the default value may
2271    /// change over time.
2272    Auto,
2273
2274    /// Currently the default backend, Cranelift aims to be a reasonably fast
2275    /// code generator which generates high quality machine code.
2276    Cranelift,
2277
2278    /// A baseline compiler for WebAssembly, currently under active development and not ready for
2279    /// production applications.
2280    Winch,
2281}
2282
2283impl Strategy {
2284    fn not_auto(&self) -> Option<Strategy> {
2285        match self {
2286            Strategy::Auto => {
2287                if cfg!(feature = "cranelift") {
2288                    Some(Strategy::Cranelift)
2289                } else if cfg!(feature = "winch") {
2290                    Some(Strategy::Winch)
2291                } else {
2292                    None
2293                }
2294            }
2295            other => Some(*other),
2296        }
2297    }
2298}
2299
2300/// Possible optimization levels for the Cranelift codegen backend.
2301#[non_exhaustive]
2302#[derive(Copy, Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
2303pub enum OptLevel {
2304    /// No optimizations performed, minimizes compilation time by disabling most
2305    /// optimizations.
2306    None,
2307    /// Generates the fastest possible code, but may take longer.
2308    Speed,
2309    /// Similar to `speed`, but also performs transformations aimed at reducing
2310    /// code size.
2311    SpeedAndSize,
2312}
2313
2314/// Select which profiling technique to support.
2315#[derive(Debug, Clone, Copy, PartialEq)]
2316pub enum ProfilingStrategy {
2317    /// No profiler support.
2318    None,
2319
2320    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
2321    PerfMap,
2322
2323    /// Collect profiling info for "jitdump" file format, used with `perf` on
2324    /// Linux.
2325    JitDump,
2326
2327    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
2328    VTune,
2329}
2330
2331/// Select how wasm backtrace detailed information is handled.
2332#[derive(Debug, Clone, Copy)]
2333pub enum WasmBacktraceDetails {
2334    /// Support is unconditionally enabled and wasmtime will parse and read
2335    /// debug information.
2336    Enable,
2337
2338    /// Support is disabled, and wasmtime will not parse debug information for
2339    /// backtrace details.
2340    Disable,
2341
2342    /// Support for backtrace details is conditional on the
2343    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
2344    Environment,
2345}
2346
2347/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
2348/// change the behavior of the pooling instance allocator.
2349///
2350/// This structure has a builder-style API in the same manner as [`Config`] and
2351/// is configured with [`Config::allocation_strategy`].
2352///
2353/// Note that usage of the pooling allocator does not affect compiled
2354/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
2355/// with and without the pooling allocator.
2356///
2357/// ## Advantages of Pooled Allocation
2358///
2359/// The main benefit of the pooling allocator is to make WebAssembly
2360/// instantiation both faster and more scalable in terms of parallelism.
2361/// Allocation is faster because virtual memory is already configured and ready
2362/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
2363/// new region and configure it with guard pages. By avoiding [`mmap`] this
2364/// avoids whole-process virtual memory locks which can improve scalability and
2365/// performance through avoiding this.
2366///
2367/// Additionally with pooled allocation it's possible to create "affine slots"
2368/// to a particular WebAssembly module or component over time. For example if
2369/// the same module is multiple times over time the pooling allocator will, by
2370/// default, attempt to reuse the same slot. This mean that the slot has been
2371/// pre-configured and can retain virtual memory mappings for a copy-on-write
2372/// image, for example (see [`Config::memory_init_cow`] for more information.
2373/// This means that in a steady state instance deallocation is a single
2374/// [`madvise`] to reset linear memory to its original contents followed by a
2375/// single (optional) [`mprotect`] during the next instantiation to shrink
2376/// memory back to its original size. Compared to non-pooled allocation this
2377/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
2378/// [`mprotect`] regions too.
2379///
2380/// Another benefit of pooled allocation is that it's possible to configure
2381/// things such that no virtual memory management is required at all in a steady
2382/// state. For example a pooling allocator can be configured with
2383/// [`Config::memory_init_cow`] disabledd, dynamic bounds checks enabled
2384/// through
2385/// [`Config::static_memory_maximum_size(0)`](Config::static_memory_maximum_size),
2386/// and sufficient space through
2387/// [`PoolingAllocationConfig::table_keep_resident`] /
2388/// [`PoolingAllocationConfig::linear_memory_keep_resident`]. With all these
2389/// options in place no virtual memory tricks are used at all and everything is
2390/// manually managed by Wasmtime (for example resetting memory is a
2391/// `memset(0)`). This is not as fast in a single-threaded scenario but can
2392/// provide benefits in high-parallelism situations as no virtual memory locks
2393/// or IPIs need happen.
2394///
2395/// ## Disadvantages of Pooled Allocation
2396///
2397/// Despite the above advantages to instantiation performance the pooling
2398/// allocator is not enabled by default in Wasmtime. One reason is that the
2399/// performance advantages are not necessarily portable, for example while the
2400/// pooling allocator works on Windows it has not been tuned for performance on
2401/// Windows in the same way it has on Linux.
2402///
2403/// Additionally the main cost of the pooling allocator is that it requires a
2404/// very large reservation of virtual memory (on the order of most of the
2405/// addressable virtual address space). WebAssembly 32-bit linear memories in
2406/// Wasmtime are, by default 4G address space reservations with a 2G guard
2407/// region both before and after the linear memory. Memories in the pooling
2408/// allocator are contiguous which means that we only need a guard after linear
2409/// memory because the previous linear memory's slot post-guard is our own
2410/// pre-guard. This means that, by default, the pooling allocator uses 6G of
2411/// virtual memory per WebAssembly linear memory slot. 6G of virtual memory is
2412/// 32.5 bits of a 64-bit address. Many 64-bit systems can only actually use
2413/// 48-bit addresses by default (although this can be extended on architectures
2414/// nowadays too), and of those 48 bits one of them is reserved to indicate
2415/// kernel-vs-userspace. This leaves 47-32.5=14.5 bits left, meaning you can
2416/// only have at most 64k slots of linear memories on many systems by default.
2417/// This is a relatively small number and shows how the pooling allocator can
2418/// quickly exhaust all of virtual memory.
2419///
2420/// Another disadvantage of the pooling allocator is that it may keep memory
2421/// alive when nothing is using it. A previously used slot for an instance might
2422/// have paged-in memory that will not get paged out until the
2423/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
2424/// suitable for some applications this behavior may not be suitable for all
2425/// applications.
2426///
2427/// Finally the last disadvantage of the pooling allocator is that the
2428/// configuration values for the maximum number of instances, memories, tables,
2429/// etc, must all be fixed up-front. There's not always a clear answer as to
2430/// what these values should be so not all applications may be able to work
2431/// with this constraint.
2432///
2433/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
2434/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
2435/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
2436/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
2437#[cfg(feature = "pooling-allocator")]
2438#[derive(Debug, Clone, Default)]
2439pub struct PoolingAllocationConfig {
2440    config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
2441}
2442
2443#[cfg(feature = "pooling-allocator")]
2444impl PoolingAllocationConfig {
2445    /// Configures the maximum number of "unused warm slots" to retain in the
2446    /// pooling allocator.
2447    ///
2448    /// The pooling allocator operates over slots to allocate from, and each
2449    /// slot is considered "cold" if it's never been used before or "warm" if
2450    /// it's been used by some module in the past. Slots in the pooling
2451    /// allocator additionally track an "affinity" flag to a particular core
2452    /// wasm module. When a module is instantiated into a slot then the slot is
2453    /// considered affine to that module, even after the instance has been
2454    /// deallocated.
2455    ///
2456    /// When a new instance is created then a slot must be chosen, and the
2457    /// current algorithm for selecting a slot is:
2458    ///
2459    /// * If there are slots that are affine to the module being instantiated,
2460    ///   then the most recently used slot is selected to be allocated from.
2461    ///   This is done to improve reuse of resources such as memory mappings and
2462    ///   additionally try to benefit from temporal locality for things like
2463    ///   caches.
2464    ///
2465    /// * Otherwise if there are more than N affine slots to other modules, then
2466    ///   one of those affine slots is chosen to be allocated. The slot chosen
2467    ///   is picked on a least-recently-used basis.
2468    ///
2469    /// * Finally, if there are less than N affine slots to other modules, then
2470    ///   the non-affine slots are allocated from.
2471    ///
2472    /// This setting, `max_unused_warm_slots`, is the value for N in the above
2473    /// algorithm. The purpose of this setting is to have a knob over the RSS
2474    /// impact of "unused slots" for a long-running wasm server.
2475    ///
2476    /// If this setting is set to 0, for example, then affine slots are
2477    /// aggressively reused on a least-recently-used basis. A "cold" slot is
2478    /// only used if there are no affine slots available to allocate from. This
2479    /// means that the set of slots used over the lifetime of a program is the
2480    /// same as the maximum concurrent number of wasm instances.
2481    ///
2482    /// If this setting is set to infinity, however, then cold slots are
2483    /// prioritized to be allocated from. This means that the set of slots used
2484    /// over the lifetime of a program will approach
2485    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
2486    /// slots in the pooling allocator.
2487    ///
2488    /// Wasmtime does not aggressively decommit all resources associated with a
2489    /// slot when the slot is not in use. For example the
2490    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
2491    /// used to keep memory associated with a slot, even when it's not in use.
2492    /// This means that the total set of used slots in the pooling instance
2493    /// allocator can impact the overall RSS usage of a program.
2494    ///
2495    /// The default value for this option is `100`.
2496    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
2497        self.config.max_unused_warm_slots = max;
2498        self
2499    }
2500
2501    /// The target number of decommits to do per batch.
2502    ///
2503    /// This is not precise, as we can queue up decommits at times when we
2504    /// aren't prepared to immediately flush them, and so we may go over this
2505    /// target size occasionally.
2506    ///
2507    /// A batch size of one effectively disables batching.
2508    ///
2509    /// Defaults to `1`.
2510    pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
2511        self.config.decommit_batch_size = batch_size;
2512        self
2513    }
2514
2515    /// Configures whether or not stacks used for async futures are reset to
2516    /// zero after usage.
2517    ///
2518    /// When the [`async_support`](Config::async_support) method is enabled for
2519    /// Wasmtime and the [`call_async`] variant
2520    /// of calling WebAssembly is used then Wasmtime will create a separate
2521    /// runtime execution stack for each future produced by [`call_async`].
2522    /// During the deallocation process Wasmtime won't by default reset the
2523    /// contents of the stack back to zero.
2524    ///
2525    /// When this option is enabled it can be seen as a defense-in-depth
2526    /// mechanism to reset a stack back to zero. This is not required for
2527    /// correctness and can be a costly operation in highly concurrent
2528    /// environments due to modifications of the virtual address space requiring
2529    /// process-wide synchronization.
2530    ///
2531    /// This option defaults to `false`.
2532    ///
2533    /// [`call_async`]: crate::TypedFunc::call_async
2534    #[cfg(feature = "async")]
2535    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
2536        self.config.async_stack_zeroing = enable;
2537        self
2538    }
2539
2540    /// How much memory, in bytes, to keep resident for async stacks allocated
2541    /// with the pooling allocator.
2542    ///
2543    /// When [`PoolingAllocationConfig::async_stack_zeroing`] is enabled then
2544    /// Wasmtime will reset the contents of async stacks back to zero upon
2545    /// deallocation. This option can be used to perform the zeroing operation
2546    /// with `memset` up to a certain threshold of bytes instead of using system
2547    /// calls to reset the stack to zero.
2548    ///
2549    /// Note that when using this option the memory with async stacks will
2550    /// never be decommitted.
2551    #[cfg(feature = "async")]
2552    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
2553        self.config.async_stack_keep_resident = size;
2554        self
2555    }
2556
2557    /// How much memory, in bytes, to keep resident for each linear memory
2558    /// after deallocation.
2559    ///
2560    /// This option is only applicable on Linux and has no effect on other
2561    /// platforms.
2562    ///
2563    /// By default Wasmtime will use `madvise` to reset the entire contents of
2564    /// linear memory back to zero when a linear memory is deallocated. This
2565    /// option can be used to use `memset` instead to set memory back to zero
2566    /// which can, in some configurations, reduce the number of page faults
2567    /// taken when a slot is reused.
2568    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
2569        self.config.linear_memory_keep_resident = size;
2570        self
2571    }
2572
2573    /// How much memory, in bytes, to keep resident for each table after
2574    /// deallocation.
2575    ///
2576    /// This option is only applicable on Linux and has no effect on other
2577    /// platforms.
2578    ///
2579    /// This option is the same as
2580    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
2581    /// is applicable to tables instead.
2582    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
2583        self.config.table_keep_resident = size;
2584        self
2585    }
2586
2587    /// The maximum number of concurrent component instances supported (default
2588    /// is `1000`).
2589    ///
2590    /// This provides an upper-bound on the total size of component
2591    /// metadata-related allocations, along with
2592    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
2593    ///
2594    /// ```text
2595    /// total_component_instances * max_component_instance_size
2596    /// ```
2597    ///
2598    /// where `max_component_instance_size` is rounded up to the size and alignment
2599    /// of the internal representation of the metadata.
2600    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
2601        self.config.limits.total_component_instances = count;
2602        self
2603    }
2604
2605    /// The maximum size, in bytes, allocated for a component instance's
2606    /// `VMComponentContext` metadata.
2607    ///
2608    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
2609    /// has a static size but its internal `VMComponentContext` is dynamically
2610    /// sized depending on the component being instantiated. This size limit
2611    /// loosely correlates to the size of the component, taking into account
2612    /// factors such as:
2613    ///
2614    /// * number of lifted and lowered functions,
2615    /// * number of memories
2616    /// * number of inner instances
2617    /// * number of resources
2618    ///
2619    /// If the allocated size per instance is too small then instantiation of a
2620    /// module will fail at runtime with an error indicating how many bytes were
2621    /// needed.
2622    ///
2623    /// The default value for this is 1MiB.
2624    ///
2625    /// This provides an upper-bound on the total size of component
2626    /// metadata-related allocations, along with
2627    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
2628    ///
2629    /// ```text
2630    /// total_component_instances * max_component_instance_size
2631    /// ```
2632    ///
2633    /// where `max_component_instance_size` is rounded up to the size and alignment
2634    /// of the internal representation of the metadata.
2635    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
2636        self.config.limits.component_instance_size = size;
2637        self
2638    }
2639
2640    /// The maximum number of core instances a single component may contain
2641    /// (default is `20`).
2642    ///
2643    /// This method (along with
2644    /// [`PoolingAllocationConfig::max_memories_per_component`],
2645    /// [`PoolingAllocationConfig::max_tables_per_component`], and
2646    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
2647    /// the amount of resources a single component allocation consumes.
2648    ///
2649    /// If a component will instantiate more core instances than `count`, then
2650    /// the component will fail to instantiate.
2651    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
2652        self.config.limits.max_core_instances_per_component = count;
2653        self
2654    }
2655
2656    /// The maximum number of Wasm linear memories that a single component may
2657    /// transitively contain (default is `20`).
2658    ///
2659    /// This method (along with
2660    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
2661    /// [`PoolingAllocationConfig::max_tables_per_component`], and
2662    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
2663    /// the amount of resources a single component allocation consumes.
2664    ///
2665    /// If a component transitively contains more linear memories than `count`,
2666    /// then the component will fail to instantiate.
2667    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
2668        self.config.limits.max_memories_per_component = count;
2669        self
2670    }
2671
2672    /// The maximum number of tables that a single component may transitively
2673    /// contain (default is `20`).
2674    ///
2675    /// This method (along with
2676    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
2677    /// [`PoolingAllocationConfig::max_memories_per_component`],
2678    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
2679    /// the amount of resources a single component allocation consumes.
2680    ///
2681    /// If a component will transitively contains more tables than `count`, then
2682    /// the component will fail to instantiate.
2683    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
2684        self.config.limits.max_tables_per_component = count;
2685        self
2686    }
2687
2688    /// The maximum number of concurrent Wasm linear memories supported (default
2689    /// is `1000`).
2690    ///
2691    /// This value has a direct impact on the amount of memory allocated by the pooling
2692    /// instance allocator.
2693    ///
2694    /// The pooling instance allocator allocates a memory pool, where each entry
2695    /// in the pool contains the reserved address space for each linear memory
2696    /// supported by an instance.
2697    ///
2698    /// The memory pool will reserve a large quantity of host process address
2699    /// space to elide the bounds checks required for correct WebAssembly memory
2700    /// semantics. Even with 64-bit address spaces, the address space is limited
2701    /// when dealing with a large number of linear memories.
2702    ///
2703    /// For example, on Linux x86_64, the userland address space limit is 128
2704    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
2705    /// GiB of space by default.
2706    pub fn total_memories(&mut self, count: u32) -> &mut Self {
2707        self.config.limits.total_memories = count;
2708        self
2709    }
2710
2711    /// The maximum number of concurrent tables supported (default is `1000`).
2712    ///
2713    /// This value has a direct impact on the amount of memory allocated by the
2714    /// pooling instance allocator.
2715    ///
2716    /// The pooling instance allocator allocates a table pool, where each entry
2717    /// in the pool contains the space needed for each WebAssembly table
2718    /// supported by an instance (see `table_elements` to control the size of
2719    /// each table).
2720    pub fn total_tables(&mut self, count: u32) -> &mut Self {
2721        self.config.limits.total_tables = count;
2722        self
2723    }
2724
2725    /// The maximum number of execution stacks allowed for asynchronous
2726    /// execution, when enabled (default is `1000`).
2727    ///
2728    /// This value has a direct impact on the amount of memory allocated by the
2729    /// pooling instance allocator.
2730    #[cfg(feature = "async")]
2731    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
2732        self.config.limits.total_stacks = count;
2733        self
2734    }
2735
2736    /// The maximum number of concurrent core instances supported (default is
2737    /// `1000`).
2738    ///
2739    /// This provides an upper-bound on the total size of core instance
2740    /// metadata-related allocations, along with
2741    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
2742    ///
2743    /// ```text
2744    /// total_core_instances * max_core_instance_size
2745    /// ```
2746    ///
2747    /// where `max_core_instance_size` is rounded up to the size and alignment of
2748    /// the internal representation of the metadata.
2749    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
2750        self.config.limits.total_core_instances = count;
2751        self
2752    }
2753
2754    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
2755    /// metadata.
2756    ///
2757    /// The [`Instance`][crate::Instance] type has a static size but its
2758    /// `VMContext` metadata is dynamically sized depending on the module being
2759    /// instantiated. This size limit loosely correlates to the size of the Wasm
2760    /// module, taking into account factors such as:
2761    ///
2762    /// * number of functions
2763    /// * number of globals
2764    /// * number of memories
2765    /// * number of tables
2766    /// * number of function types
2767    ///
2768    /// If the allocated size per instance is too small then instantiation of a
2769    /// module will fail at runtime with an error indicating how many bytes were
2770    /// needed.
2771    ///
2772    /// The default value for this is 1MiB.
2773    ///
2774    /// This provides an upper-bound on the total size of core instance
2775    /// metadata-related allocations, along with
2776    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
2777    ///
2778    /// ```text
2779    /// total_core_instances * max_core_instance_size
2780    /// ```
2781    ///
2782    /// where `max_core_instance_size` is rounded up to the size and alignment of
2783    /// the internal representation of the metadata.
2784    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
2785        self.config.limits.core_instance_size = size;
2786        self
2787    }
2788
2789    /// The maximum number of defined tables for a core module (default is `1`).
2790    ///
2791    /// This value controls the capacity of the `VMTableDefinition` table in
2792    /// each instance's `VMContext` structure.
2793    ///
2794    /// The allocated size of the table will be `tables *
2795    /// sizeof(VMTableDefinition)` for each instance regardless of how many
2796    /// tables are defined by an instance's module.
2797    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
2798        self.config.limits.max_tables_per_module = tables;
2799        self
2800    }
2801
2802    /// The maximum table elements for any table defined in a module (default is
2803    /// `20000`).
2804    ///
2805    /// If a table's minimum element limit is greater than this value, the
2806    /// module will fail to instantiate.
2807    ///
2808    /// If a table's maximum element limit is unbounded or greater than this
2809    /// value, the maximum will be `table_elements` for the purpose of any
2810    /// `table.grow` instruction.
2811    ///
2812    /// This value is used to reserve the maximum space for each supported
2813    /// table; table elements are pointer-sized in the Wasmtime runtime.
2814    /// Therefore, the space reserved for each instance is `tables *
2815    /// table_elements * sizeof::<*const ()>`.
2816    pub fn table_elements(&mut self, elements: u32) -> &mut Self {
2817        self.config.limits.table_elements = elements;
2818        self
2819    }
2820
2821    /// The maximum number of defined linear memories for a module (default is
2822    /// `1`).
2823    ///
2824    /// This value controls the capacity of the `VMMemoryDefinition` table in
2825    /// each core instance's `VMContext` structure.
2826    ///
2827    /// The allocated size of the table will be `memories *
2828    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
2829    /// many memories are defined by the core instance's module.
2830    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
2831        self.config.limits.max_memories_per_module = memories;
2832        self
2833    }
2834
2835    /// The maximum byte size that any WebAssembly linear memory may grow to.
2836    ///
2837    /// This option defaults to 4 GiB meaning that for 32-bit linear memories
2838    /// there is no restrictions. 64-bit linear memories will not be allowed to
2839    /// grow beyond 4 GiB by default.
2840    ///
2841    /// If a memory's minimum size is greater than this value, the module will
2842    /// fail to instantiate.
2843    ///
2844    /// If a memory's maximum size is unbounded or greater than this value, the
2845    /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
2846    /// instruction.
2847    ///
2848    /// This value is used to control the maximum accessible space for each
2849    /// linear memory of a core instance. This can be thought of as a simple
2850    /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
2851    /// at runtime. This value can also affect striping/coloring behavior when
2852    /// used in conjunction with
2853    /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
2854    ///
2855    /// The virtual memory reservation size of each linear memory is controlled
2856    /// by the [`Config::static_memory_maximum_size`] setting and this method's
2857    /// configuration cannot exceed [`Config::static_memory_maximum_size`].
2858    pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
2859        self.config.limits.max_memory_size = bytes;
2860        self
2861    }
2862
2863    /// Configures whether memory protection keys (MPK) should be used for more
2864    /// efficient layout of pool-allocated memories.
2865    ///
2866    /// When using the pooling allocator (see [`Config::allocation_strategy`],
2867    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
2868    /// reduce the total amount of allocated virtual memory by eliminating guard
2869    /// regions between WebAssembly memories in the pool. It does so by
2870    /// "coloring" memory regions with different memory keys and setting which
2871    /// regions are accessible each time executions switches from host to guest
2872    /// (or vice versa).
2873    ///
2874    /// Leveraging MPK requires configuring a smaller-than-default
2875    /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
2876    /// this coloring/striping behavior. For example embeddings might want to
2877    /// reduce the default 4G allowance to 128M.
2878    ///
2879    /// MPK is only available on Linux (called `pku` there) and recent x86
2880    /// systems; we check for MPK support at runtime by examining the `CPUID`
2881    /// register. This configuration setting can be in three states:
2882    ///
2883    /// - `auto`: if MPK support is available the guard regions are removed; if
2884    ///   not, the guard regions remain
2885    /// - `enable`: use MPK to eliminate guard regions; fail if MPK is not
2886    ///   supported
2887    /// - `disable`: never use MPK
2888    ///
2889    /// By default this value is `disabled`, but may become `auto` in future
2890    /// releases.
2891    ///
2892    /// __WARNING__: this configuration options is still experimental--use at
2893    /// your own risk! MPK uses kernel and CPU features to protect memory
2894    /// regions; you may observe segmentation faults if anything is
2895    /// misconfigured.
2896    #[cfg(feature = "memory-protection-keys")]
2897    pub fn memory_protection_keys(&mut self, enable: MpkEnabled) -> &mut Self {
2898        self.config.memory_protection_keys = enable;
2899        self
2900    }
2901
2902    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
2903    /// will use.
2904    ///
2905    /// This setting is only applicable when
2906    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
2907    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
2908    /// has no effect.
2909    ///
2910    /// If multiple Wasmtime engines are used in the same process, note that all
2911    /// engines will share the same set of allocated keys; this setting will
2912    /// limit how many keys are allocated initially and thus available to all
2913    /// other engines.
2914    #[cfg(feature = "memory-protection-keys")]
2915    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
2916        self.config.max_memory_protection_keys = max;
2917        self
2918    }
2919
2920    /// Check if memory protection keys (MPK) are available on the current host.
2921    ///
2922    /// This is a convenience method for determining MPK availability using the
2923    /// same method that [`MpkEnabled::Auto`] does. See
2924    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
2925    /// information.
2926    #[cfg(feature = "memory-protection-keys")]
2927    pub fn are_memory_protection_keys_available() -> bool {
2928        crate::runtime::vm::mpk::is_supported()
2929    }
2930
2931    /// The maximum number of concurrent GC heaps supported (default is `1000`).
2932    ///
2933    /// This value has a direct impact on the amount of memory allocated by the
2934    /// pooling instance allocator.
2935    ///
2936    /// The pooling instance allocator allocates a GC heap pool, where each
2937    /// entry in the pool contains the space needed for each GC heap used by a
2938    /// store.
2939    #[cfg(feature = "gc")]
2940    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
2941        self.config.limits.total_gc_heaps = count;
2942        self
2943    }
2944}
2945
2946pub(crate) fn probestack_supported(arch: Architecture) -> bool {
2947    matches!(
2948        arch,
2949        Architecture::X86_64 | Architecture::Aarch64(_) | Architecture::Riscv64(_)
2950    )
2951}
2952
2953#[cfg(feature = "std")]
2954fn detect_host_feature(feature: &str) -> Option<bool> {
2955    #[cfg(target_arch = "aarch64")]
2956    {
2957        return match feature {
2958            "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
2959            "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
2960            "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
2961
2962            _ => None,
2963        };
2964    }
2965
2966    // There is no is_s390x_feature_detected macro yet, so for now
2967    // we use getauxval from the libc crate directly.
2968    #[cfg(all(target_arch = "s390x", target_os = "linux"))]
2969    {
2970        let v = unsafe { libc::getauxval(libc::AT_HWCAP) };
2971        const HWCAP_S390X_VXRS_EXT2: libc::c_ulong = 32768;
2972
2973        return match feature {
2974            // There is no separate HWCAP bit for mie2, so assume
2975            // that any machine with vxrs_ext2 also has mie2.
2976            "vxrs_ext2" | "mie2" => Some((v & HWCAP_S390X_VXRS_EXT2) != 0),
2977
2978            _ => None,
2979        };
2980    }
2981
2982    #[cfg(target_arch = "riscv64")]
2983    {
2984        return match feature {
2985            // due to `is_riscv64_feature_detected` is not stable.
2986            // we cannot use it. For now lie and say all features are always
2987            // found to keep tests working.
2988            _ => Some(true),
2989        };
2990    }
2991
2992    #[cfg(target_arch = "x86_64")]
2993    {
2994        return match feature {
2995            "sse3" => Some(std::is_x86_feature_detected!("sse3")),
2996            "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
2997            "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
2998            "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
2999            "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
3000            "avx" => Some(std::is_x86_feature_detected!("avx")),
3001            "avx2" => Some(std::is_x86_feature_detected!("avx2")),
3002            "fma" => Some(std::is_x86_feature_detected!("fma")),
3003            "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
3004            "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
3005            "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
3006            "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
3007            "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
3008            "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
3009            "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
3010            "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
3011
3012            _ => None,
3013        };
3014    }
3015
3016    #[allow(unreachable_code)]
3017    {
3018        let _ = feature;
3019        return None;
3020    }
3021}