linera_wasmer_vm/trap/
traphandlers.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4//! WebAssembly trap handling, which is built on top of the lower-level
5//! signalhandling mechanisms.
6
7use crate::vmcontext::{VMFunctionContext, VMTrampoline};
8use crate::{Trap, VMFunctionBody};
9use backtrace::Backtrace;
10use core::ptr::{read, read_unaligned};
11use corosensei::stack::DefaultStack;
12use corosensei::trap::{CoroutineTrapHandler, TrapHandlerRegs};
13use corosensei::{CoroutineResult, ScopedCoroutine, Yielder};
14use scopeguard::defer;
15use std::any::Any;
16use std::cell::Cell;
17use std::error::Error;
18use std::io;
19use std::mem;
20#[cfg(unix)]
21use std::mem::MaybeUninit;
22use std::ptr::{self, NonNull};
23use std::sync::atomic::{compiler_fence, AtomicPtr, AtomicUsize, Ordering};
24use std::sync::Once;
25use wasmer_types::TrapCode;
26
27/// Configuration for the runtime VM
28/// Currently only the stack size is configurable
29pub struct VMConfig {
30    /// Optionnal stack size (in byte) of the VM. Value lower than 8K will be rounded to 8K.
31    pub wasm_stack_size: Option<usize>,
32}
33
34// TrapInformation can be stored in the "Undefined Instruction" itself.
35// On x86_64, 0xC? select a "Register" for the Mod R/M part of "ud1" (so with no other bytes after)
36// On Arm64, the udf alows for a 16bits values, so we'll use the same 0xC? to store the trapinfo
37static MAGIC: u8 = 0xc0;
38
39static DEFAULT_STACK_SIZE: AtomicUsize = AtomicUsize::new(1024 * 1024);
40
41// Current definition of `ucontext_t` in the `libc` crate is incorrect
42// on aarch64-apple-drawin so it's defined here with a more accurate definition.
43#[repr(C)]
44#[cfg(all(target_arch = "aarch64", target_os = "macos"))]
45#[allow(non_camel_case_types)]
46struct ucontext_t {
47    uc_onstack: libc::c_int,
48    uc_sigmask: libc::sigset_t,
49    uc_stack: libc::stack_t,
50    uc_link: *mut libc::ucontext_t,
51    uc_mcsize: usize,
52    uc_mcontext: libc::mcontext_t,
53}
54
55// Current definition of `ucontext_t` in the `libc` crate is not present
56// on aarch64-unknown-freebsd so it's defined here.
57#[repr(C)]
58#[cfg(all(target_arch = "aarch64", target_os = "freebsd"))]
59#[allow(non_camel_case_types)]
60struct ucontext_t {
61    uc_sigmask: libc::sigset_t,
62    uc_mcontext: libc::mcontext_t,
63    uc_link: *mut ucontext_t,
64    uc_stack: libc::stack_t,
65    uc_flags: libc::c_int,
66    spare: [libc::c_int; 4],
67}
68
69#[cfg(all(
70    unix,
71    not(all(target_arch = "aarch64", target_os = "macos")),
72    not(all(target_arch = "aarch64", target_os = "freebsd"))
73))]
74use libc::ucontext_t;
75
76/// Default stack size is 1MB.
77pub fn set_stack_size(size: usize) {
78    DEFAULT_STACK_SIZE.store(size.max(8 * 1024).min(100 * 1024 * 1024), Ordering::Relaxed);
79}
80
81cfg_if::cfg_if! {
82    if #[cfg(unix)] {
83        /// Function which may handle custom signals while processing traps.
84        pub type TrapHandlerFn<'a> = dyn Fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) -> bool + Send + Sync + 'a;
85    } else if #[cfg(target_os = "windows")] {
86        /// Function which may handle custom signals while processing traps.
87        pub type TrapHandlerFn<'a> = dyn Fn(*mut windows_sys::Win32::System::Diagnostics::Debug::EXCEPTION_POINTERS) -> bool + Send + Sync + 'a;
88    }
89}
90
91// Process an IllegalOpcode to see if it has a TrapCode payload
92unsafe fn process_illegal_op(addr: usize) -> Option<TrapCode> {
93    let mut val: Option<u8> = None;
94    if cfg!(target_arch = "x86_64") {
95        val = if read(addr as *mut u8) & 0xf0 == 0x40
96            && read((addr + 1) as *mut u8) == 0x0f
97            && read((addr + 2) as *mut u8) == 0xb9
98        {
99            Some(read((addr + 3) as *mut u8))
100        } else if read(addr as *mut u8) == 0x0f && read((addr + 1) as *mut u8) == 0xb9 {
101            Some(read((addr + 2) as *mut u8))
102        } else {
103            None
104        }
105    }
106    if cfg!(target_arch = "aarch64") {
107        val = if read_unaligned(addr as *mut u32) & 0xffff0000 == 0 {
108            Some(read(addr as *mut u8))
109        } else {
110            None
111        }
112    }
113    match val.and_then(|val| {
114        if val & MAGIC == MAGIC {
115            Some(val & 0xf)
116        } else {
117            None
118        }
119    }) {
120        None => None,
121        Some(val) => match val {
122            0 => Some(TrapCode::StackOverflow),
123            1 => Some(TrapCode::HeapAccessOutOfBounds),
124            2 => Some(TrapCode::HeapMisaligned),
125            3 => Some(TrapCode::TableAccessOutOfBounds),
126            4 => Some(TrapCode::IndirectCallToNull),
127            5 => Some(TrapCode::BadSignature),
128            6 => Some(TrapCode::IntegerOverflow),
129            7 => Some(TrapCode::IntegerDivisionByZero),
130            8 => Some(TrapCode::BadConversionToInteger),
131            9 => Some(TrapCode::UnreachableCodeReached),
132            10 => Some(TrapCode::UnalignedAtomic),
133            _ => None,
134        },
135    }
136}
137
138cfg_if::cfg_if! {
139    if #[cfg(unix)] {
140        static mut PREV_SIGSEGV: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
141        static mut PREV_SIGBUS: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
142        static mut PREV_SIGILL: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
143        static mut PREV_SIGFPE: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
144
145        unsafe fn platform_init() {
146            let register = |slot: &mut MaybeUninit<libc::sigaction>, signal: i32| {
147                let mut handler: libc::sigaction = mem::zeroed();
148                // The flags here are relatively careful, and they are...
149                //
150                // SA_SIGINFO gives us access to information like the program
151                // counter from where the fault happened.
152                //
153                // SA_ONSTACK allows us to handle signals on an alternate stack,
154                // so that the handler can run in response to running out of
155                // stack space on the main stack. Rust installs an alternate
156                // stack with sigaltstack, so we rely on that.
157                //
158                // SA_NODEFER allows us to reenter the signal handler if we
159                // crash while handling the signal, and fall through to the
160                // Breakpad handler by testing handlingSegFault.
161                handler.sa_flags = libc::SA_SIGINFO | libc::SA_NODEFER | libc::SA_ONSTACK;
162                handler.sa_sigaction = trap_handler as usize;
163                libc::sigemptyset(&mut handler.sa_mask);
164                if libc::sigaction(signal, &handler, slot.as_mut_ptr()) != 0 {
165                    panic!(
166                        "unable to install signal handler: {}",
167                        io::Error::last_os_error(),
168                    );
169                }
170            };
171
172            // Allow handling OOB with signals on all architectures
173            register(&mut PREV_SIGSEGV, libc::SIGSEGV);
174
175            // Handle `unreachable` instructions which execute `ud2` right now
176            register(&mut PREV_SIGILL, libc::SIGILL);
177
178            // x86 uses SIGFPE to report division by zero
179            if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") {
180                register(&mut PREV_SIGFPE, libc::SIGFPE);
181            }
182
183            // On ARM, handle Unaligned Accesses.
184            // On Darwin, guard page accesses are raised as SIGBUS.
185            if cfg!(target_arch = "arm") || cfg!(target_vendor = "apple") {
186                register(&mut PREV_SIGBUS, libc::SIGBUS);
187            }
188
189            // This is necessary to support debugging under LLDB on Darwin.
190            // For more details see https://github.com/mono/mono/commit/8e75f5a28e6537e56ad70bf870b86e22539c2fb7
191            #[cfg(target_vendor = "apple")]
192            {
193                use mach2::exception_types::*;
194                use mach2::kern_return::*;
195                use mach2::port::*;
196                use mach2::thread_status::*;
197                use mach2::traps::*;
198                use mach2::mach_types::*;
199
200                extern "C" {
201                    fn task_set_exception_ports(
202                        task: task_t,
203                        exception_mask: exception_mask_t,
204                        new_port: mach_port_t,
205                        behavior: exception_behavior_t,
206                        new_flavor: thread_state_flavor_t,
207                    ) -> kern_return_t;
208                }
209
210                #[allow(non_snake_case)]
211                #[cfg(target_arch = "x86_64")]
212                let MACHINE_THREAD_STATE = x86_THREAD_STATE64;
213                #[allow(non_snake_case)]
214                #[cfg(target_arch = "aarch64")]
215                let MACHINE_THREAD_STATE = 6;
216
217                task_set_exception_ports(
218                    mach_task_self(),
219                    EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC | EXC_MASK_BAD_INSTRUCTION,
220                    MACH_PORT_NULL,
221                    EXCEPTION_STATE_IDENTITY as exception_behavior_t,
222                    MACHINE_THREAD_STATE,
223                );
224            }
225        }
226
227        unsafe extern "C" fn trap_handler(
228            signum: libc::c_int,
229            siginfo: *mut libc::siginfo_t,
230            context: *mut libc::c_void,
231        ) {
232            let previous = match signum {
233                libc::SIGSEGV => &PREV_SIGSEGV,
234                libc::SIGBUS => &PREV_SIGBUS,
235                libc::SIGFPE => &PREV_SIGFPE,
236                libc::SIGILL => &PREV_SIGILL,
237                _ => panic!("unknown signal: {}", signum),
238            };
239            // We try to get the fault address associated to this signal
240            let maybe_fault_address = match signum {
241                libc::SIGSEGV | libc::SIGBUS => {
242                    Some((*siginfo).si_addr() as usize)
243                }
244                _ => None,
245            };
246            let trap_code = match signum {
247                // check if it was cased by a UD and if the Trap info is a payload to it
248                libc::SIGILL => {
249                    let addr = (*siginfo).si_addr() as usize;
250                    process_illegal_op(addr)
251                }
252                _ => None,
253            };
254            let ucontext = &mut *(context as *mut ucontext_t);
255            let (pc, sp) = get_pc_sp(ucontext);
256            let handled = TrapHandlerContext::handle_trap(
257                pc,
258                sp,
259                maybe_fault_address,
260                trap_code,
261                |regs| update_context(ucontext, regs),
262                |handler| handler(signum, siginfo, context),
263            );
264
265            if handled {
266                return;
267            }
268
269            // This signal is not for any compiled wasm code we expect, so we
270            // need to forward the signal to the next handler. If there is no
271            // next handler (SIG_IGN or SIG_DFL), then it's time to crash. To do
272            // this, we set the signal back to its original disposition and
273            // return. This will cause the faulting op to be re-executed which
274            // will crash in the normal way. If there is a next handler, call
275            // it. It will either crash synchronously, fix up the instruction
276            // so that execution can continue and return, or trigger a crash by
277            // returning the signal to it's original disposition and returning.
278            let previous = &*previous.as_ptr();
279            if previous.sa_flags & libc::SA_SIGINFO != 0 {
280                mem::transmute::<
281                    usize,
282                    extern "C" fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void),
283                >(previous.sa_sigaction)(signum, siginfo, context)
284            } else if previous.sa_sigaction == libc::SIG_DFL
285            {
286                libc::sigaction(signum, previous, ptr::null_mut());
287            } else if previous.sa_sigaction != libc::SIG_IGN {
288                mem::transmute::<usize, extern "C" fn(libc::c_int)>(
289                    previous.sa_sigaction
290                )(signum)
291            }
292        }
293
294        unsafe fn get_pc_sp(context: &ucontext_t) -> (usize, usize) {
295            let (pc, sp);
296            cfg_if::cfg_if! {
297                if #[cfg(all(
298                    any(target_os = "linux", target_os = "android"),
299                    target_arch = "x86_64",
300                ))] {
301                    pc = context.uc_mcontext.gregs[libc::REG_RIP as usize] as usize;
302                    sp = context.uc_mcontext.gregs[libc::REG_RSP as usize] as usize;
303                } else if #[cfg(all(
304                    any(target_os = "linux", target_os = "android"),
305                    target_arch = "x86",
306                ))] {
307                    pc = context.uc_mcontext.gregs[libc::REG_EIP as usize] as usize;
308                    sp = context.uc_mcontext.gregs[libc::REG_ESP as usize] as usize;
309                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86"))] {
310                    pc = context.uc_mcontext.mc_eip as usize;
311                    sp = context.uc_mcontext.mc_esp as usize;
312                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
313                    pc = context.uc_mcontext.mc_rip as usize;
314                    sp = context.uc_mcontext.mc_rsp as usize;
315                } else if #[cfg(all(target_vendor = "apple", target_arch = "x86_64"))] {
316                    pc = (*context.uc_mcontext).__ss.__rip as usize;
317                    sp = (*context.uc_mcontext).__ss.__rsp as usize;
318                } else if #[cfg(all(
319                        any(target_os = "linux", target_os = "android"),
320                        target_arch = "aarch64",
321                    ))] {
322                    pc = context.uc_mcontext.pc as usize;
323                    sp = context.uc_mcontext.sp as usize;
324                } else if #[cfg(all(
325                    any(target_os = "linux", target_os = "android"),
326                    target_arch = "arm",
327                ))] {
328                    pc = context.uc_mcontext.arm_pc as usize;
329                    sp = context.uc_mcontext.arm_sp as usize;
330                } else if #[cfg(all(
331                    any(target_os = "linux", target_os = "android"),
332                    any(target_arch = "riscv64", target_arch = "riscv32"),
333                ))] {
334                    pc = context.uc_mcontext.__gregs[libc::REG_PC] as usize;
335                    sp = context.uc_mcontext.__gregs[libc::REG_SP] as usize;
336                } else if #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] {
337                    pc = (*context.uc_mcontext).__ss.__pc as usize;
338                    sp = (*context.uc_mcontext).__ss.__sp as usize;
339                } else if #[cfg(all(target_os = "freebsd", target_arch = "aarch64"))] {
340                    pc = context.uc_mcontext.mc_gpregs.gp_elr as usize;
341                    sp = context.uc_mcontext.mc_gpregs.gp_sp as usize;
342                } else {
343                    compile_error!("Unsupported platform");
344                }
345            };
346            (pc, sp)
347        }
348
349        unsafe fn update_context(context: &mut ucontext_t, regs: TrapHandlerRegs) {
350            cfg_if::cfg_if! {
351                if #[cfg(all(
352                        any(target_os = "linux", target_os = "android"),
353                        target_arch = "x86_64",
354                    ))] {
355                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
356                    context.uc_mcontext.gregs[libc::REG_RIP as usize] = rip as i64;
357                    context.uc_mcontext.gregs[libc::REG_RSP as usize] = rsp as i64;
358                    context.uc_mcontext.gregs[libc::REG_RBP as usize] = rbp as i64;
359                    context.uc_mcontext.gregs[libc::REG_RDI as usize] = rdi as i64;
360                    context.uc_mcontext.gregs[libc::REG_RSI as usize] = rsi as i64;
361                } else if #[cfg(all(
362                    any(target_os = "linux", target_os = "android"),
363                    target_arch = "x86",
364                ))] {
365                    let TrapHandlerRegs { eip, esp, ebp, ecx, edx } = regs;
366                    context.uc_mcontext.gregs[libc::REG_EIP as usize] = eip as i32;
367                    context.uc_mcontext.gregs[libc::REG_ESP as usize] = esp as i32;
368                    context.uc_mcontext.gregs[libc::REG_EBP as usize] = ebp as i32;
369                    context.uc_mcontext.gregs[libc::REG_ECX as usize] = ecx as i32;
370                    context.uc_mcontext.gregs[libc::REG_EDX as usize] = edx as i32;
371                } else if #[cfg(all(target_vendor = "apple", target_arch = "x86_64"))] {
372                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
373                    (*context.uc_mcontext).__ss.__rip = rip;
374                    (*context.uc_mcontext).__ss.__rsp = rsp;
375                    (*context.uc_mcontext).__ss.__rbp = rbp;
376                    (*context.uc_mcontext).__ss.__rdi = rdi;
377                    (*context.uc_mcontext).__ss.__rsi = rsi;
378                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86"))] {
379                    let TrapHandlerRegs { eip, esp, ebp, ecx, edx } = regs;
380                    context.uc_mcontext.mc_eip = eip as libc::register_t;
381                    context.uc_mcontext.mc_esp = esp as libc::register_t;
382                    context.uc_mcontext.mc_ebp = ebp as libc::register_t;
383                    context.uc_mcontext.mc_ecx = ecx as libc::register_t;
384                    context.uc_mcontext.mc_edx = edx as libc::register_t;
385                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
386                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
387                    context.uc_mcontext.mc_rip = rip as libc::register_t;
388                    context.uc_mcontext.mc_rsp = rsp as libc::register_t;
389                    context.uc_mcontext.mc_rbp = rbp as libc::register_t;
390                    context.uc_mcontext.mc_rdi = rdi as libc::register_t;
391                    context.uc_mcontext.mc_rsi = rsi as libc::register_t;
392                } else if #[cfg(all(
393                        any(target_os = "linux", target_os = "android"),
394                        target_arch = "aarch64",
395                    ))] {
396                    let TrapHandlerRegs { pc, sp, x0, x1, x29, lr } = regs;
397                    context.uc_mcontext.pc = pc;
398                    context.uc_mcontext.sp = sp;
399                    context.uc_mcontext.regs[0] = x0;
400                    context.uc_mcontext.regs[1] = x1;
401                    context.uc_mcontext.regs[29] = x29;
402                    context.uc_mcontext.regs[30] = lr;
403                } else if #[cfg(all(
404                        any(target_os = "linux", target_os = "android"),
405                        target_arch = "arm",
406                    ))] {
407                    let TrapHandlerRegs {
408                        pc,
409                        r0,
410                        r1,
411                        r7,
412                        r11,
413                        r13,
414                        r14,
415                        cpsr_thumb,
416                        cpsr_endian,
417                    } = regs;
418                    context.uc_mcontext.arm_pc = pc;
419                    context.uc_mcontext.arm_r0 = r0;
420                    context.uc_mcontext.arm_r1 = r1;
421                    context.uc_mcontext.arm_r7 = r7;
422                    context.uc_mcontext.arm_fp = r11;
423                    context.uc_mcontext.arm_sp = r13;
424                    context.uc_mcontext.arm_lr = r14;
425                    if cpsr_thumb {
426                        context.uc_mcontext.arm_cpsr |= 0x20;
427                    } else {
428                        context.uc_mcontext.arm_cpsr &= !0x20;
429                    }
430                    if cpsr_endian {
431                        context.uc_mcontext.arm_cpsr |= 0x200;
432                    } else {
433                        context.uc_mcontext.arm_cpsr &= !0x200;
434                    }
435                } else if #[cfg(all(
436                    any(target_os = "linux", target_os = "android"),
437                    any(target_arch = "riscv64", target_arch = "riscv32"),
438                ))] {
439                    let TrapHandlerRegs { pc, ra, sp, a0, a1, s0 } = regs;
440                    context.uc_mcontext.__gregs[libc::REG_PC] = pc as libc::c_ulong;
441                    context.uc_mcontext.__gregs[libc::REG_RA] = ra as libc::c_ulong;
442                    context.uc_mcontext.__gregs[libc::REG_SP] = sp as libc::c_ulong;
443                    context.uc_mcontext.__gregs[libc::REG_A0] = a0 as libc::c_ulong;
444                    context.uc_mcontext.__gregs[libc::REG_A0 + 1] = a1 as libc::c_ulong;
445                    context.uc_mcontext.__gregs[libc::REG_S0] = s0 as libc::c_ulong;
446                } else if #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] {
447                    let TrapHandlerRegs { pc, sp, x0, x1, x29, lr } = regs;
448                    (*context.uc_mcontext).__ss.__pc = pc;
449                    (*context.uc_mcontext).__ss.__sp = sp;
450                    (*context.uc_mcontext).__ss.__x[0] = x0;
451                    (*context.uc_mcontext).__ss.__x[1] = x1;
452                    (*context.uc_mcontext).__ss.__fp = x29;
453                    (*context.uc_mcontext).__ss.__lr = lr;
454                } else if #[cfg(all(target_os = "freebsd", target_arch = "aarch64"))] {
455                    let TrapHandlerRegs { pc, sp, x0, x1, x29, lr } = regs;
456                    context.uc_mcontext.mc_gpregs.gp_elr = pc as libc::register_t;
457                    context.uc_mcontext.mc_gpregs.gp_sp = sp as libc::register_t;
458                    context.uc_mcontext.mc_gpregs.gp_x[0] = x0 as libc::register_t;
459                    context.uc_mcontext.mc_gpregs.gp_x[1] = x1 as libc::register_t;
460                    context.uc_mcontext.mc_gpregs.gp_x[29] = x29 as libc::register_t;
461                    context.uc_mcontext.mc_gpregs.gp_x[30] = lr as libc::register_t;
462                } else {
463                    compile_error!("Unsupported platform");
464                }
465            };
466        }
467    } else if #[cfg(target_os = "windows")] {
468        use windows_sys::Win32::System::Diagnostics::Debug::{
469            AddVectoredExceptionHandler,
470            CONTEXT,
471            EXCEPTION_CONTINUE_EXECUTION,
472            EXCEPTION_CONTINUE_SEARCH,
473            EXCEPTION_POINTERS,
474        };
475        use windows_sys::Win32::Foundation::{
476            EXCEPTION_ACCESS_VIOLATION,
477            EXCEPTION_ILLEGAL_INSTRUCTION,
478            EXCEPTION_INT_DIVIDE_BY_ZERO,
479            EXCEPTION_INT_OVERFLOW,
480            EXCEPTION_STACK_OVERFLOW,
481        };
482
483        unsafe fn platform_init() {
484            // our trap handler needs to go first, so that we can recover from
485            // wasm faults and continue execution, so pass `1` as a true value
486            // here.
487            if AddVectoredExceptionHandler(1, Some(exception_handler)).is_null() {
488                panic!("failed to add exception handler: {}", io::Error::last_os_error());
489            }
490        }
491
492        unsafe extern "system" fn exception_handler(
493            exception_info: *mut EXCEPTION_POINTERS
494        ) -> i32 {
495            // Check the kind of exception, since we only handle a subset within
496            // wasm code. If anything else happens we want to defer to whatever
497            // the rest of the system wants to do for this exception.
498            let record = &*(*exception_info).ExceptionRecord;
499            if record.ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
500                record.ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION &&
501                record.ExceptionCode != EXCEPTION_STACK_OVERFLOW &&
502                record.ExceptionCode != EXCEPTION_INT_DIVIDE_BY_ZERO &&
503                record.ExceptionCode != EXCEPTION_INT_OVERFLOW
504            {
505                return EXCEPTION_CONTINUE_SEARCH;
506            }
507
508            // FIXME: this is what the previous C++ did to make sure that TLS
509            // works by the time we execute this trap handling code. This isn't
510            // exactly super easy to call from Rust though and it's not clear we
511            // necessarily need to do so. Leaving this here in case we need this
512            // in the future, but for now we can probably wait until we see a
513            // strange fault before figuring out how to reimplement this in
514            // Rust.
515            //
516            // if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
517            //     return EXCEPTION_CONTINUE_SEARCH;
518            // }
519
520            let context = &mut *(*exception_info).ContextRecord;
521            let (pc, sp) = get_pc_sp(context);
522
523            // We try to get the fault address associated to this exception.
524            let maybe_fault_address = match record.ExceptionCode {
525                EXCEPTION_ACCESS_VIOLATION => Some(record.ExceptionInformation[1]),
526                EXCEPTION_STACK_OVERFLOW => Some(sp),
527                _ => None,
528            };
529            let trap_code = match record.ExceptionCode {
530                // check if it was cased by a UD and if the Trap info is a payload to it
531                EXCEPTION_ILLEGAL_INSTRUCTION => {
532                    process_illegal_op(pc)
533                }
534                _ => None,
535            };
536            // This is basically the same as the unix version above, only with a
537            // few parameters tweaked here and there.
538            let handled = TrapHandlerContext::handle_trap(
539                pc,
540                sp,
541                maybe_fault_address,
542                trap_code,
543                |regs| update_context(context, regs),
544                |handler| handler(exception_info),
545            );
546
547            if handled {
548                EXCEPTION_CONTINUE_EXECUTION
549            } else {
550                EXCEPTION_CONTINUE_SEARCH
551            }
552        }
553
554        unsafe fn get_pc_sp(context: &CONTEXT) -> (usize, usize) {
555            let (pc, sp);
556            cfg_if::cfg_if! {
557                if #[cfg(target_arch = "x86_64")] {
558                    pc = context.Rip as usize;
559                    sp = context.Rsp as usize;
560                } else if #[cfg(target_arch = "x86")] {
561                    pc = context.Rip as usize;
562                    sp = context.Rsp as usize;
563                } else {
564                    compile_error!("Unsupported platform");
565                }
566            };
567            (pc, sp)
568        }
569
570        unsafe fn update_context(context: &mut CONTEXT, regs: TrapHandlerRegs) {
571            cfg_if::cfg_if! {
572                if #[cfg(target_arch = "x86_64")] {
573                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
574                    context.Rip = rip;
575                    context.Rsp = rsp;
576                    context.Rbp = rbp;
577                    context.Rdi = rdi;
578                    context.Rsi = rsi;
579                } else if #[cfg(target_arch = "x86")] {
580                    let TrapHandlerRegs { eip, esp, ebp, ecx, edx } = regs;
581                    context.Eip = eip;
582                    context.Esp = esp;
583                    context.Ebp = ebp;
584                    context.Ecx = ecx;
585                    context.Edx = edx;
586                } else {
587                    compile_error!("Unsupported platform");
588                }
589            };
590        }
591    }
592}
593
594/// This function is required to be called before any WebAssembly is entered.
595/// This will configure global state such as signal handlers to prepare the
596/// process to receive wasm traps.
597///
598/// This function must not only be called globally once before entering
599/// WebAssembly but it must also be called once-per-thread that enters
600/// WebAssembly. Currently in wasmer's integration this function is called on
601/// creation of a `Store`.
602pub fn init_traps() {
603    static INIT: Once = Once::new();
604    INIT.call_once(|| unsafe {
605        platform_init();
606    });
607}
608
609/// Raises a user-defined trap immediately.
610///
611/// This function performs as-if a wasm trap was just executed, only the trap
612/// has a dynamic payload associated with it which is user-provided. This trap
613/// payload is then returned from `catch_traps` below.
614///
615/// # Safety
616///
617/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
618/// have been previous called and not yet returned.
619/// Additionally no Rust destructors may be on the stack.
620/// They will be skipped and not executed.
621pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
622    unwind_with(UnwindReason::UserTrap(data))
623}
624
625/// Raises a trap from inside library code immediately.
626///
627/// This function performs as-if a wasm trap was just executed. This trap
628/// payload is then returned from `catch_traps` below.
629///
630/// # Safety
631///
632/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
633/// have been previous called and not yet returned.
634/// Additionally no Rust destructors may be on the stack.
635/// They will be skipped and not executed.
636pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
637    unwind_with(UnwindReason::LibTrap(trap))
638}
639
640/// Carries a Rust panic across wasm code and resumes the panic on the other
641/// side.
642///
643/// # Safety
644///
645/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
646/// have been previously called and not returned. Additionally no Rust destructors may be on the
647/// stack. They will be skipped and not executed.
648pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
649    unwind_with(UnwindReason::Panic(payload))
650}
651
652/// Call the wasm function pointed to by `callee`.
653///
654/// * `vmctx` - the callee vmctx argument
655/// * `caller_vmctx` - the caller vmctx argument
656/// * `trampoline` - the jit-generated trampoline whose ABI takes 4 values, the
657///   callee vmctx, the caller vmctx, the `callee` argument below, and then the
658///   `values_vec` argument.
659/// * `callee` - the third argument to the `trampoline` function
660/// * `values_vec` - points to a buffer which holds the incoming arguments, and to
661///   which the outgoing return values will be written.
662///
663/// # Safety
664///
665/// Wildly unsafe because it calls raw function pointers and reads/writes raw
666/// function pointers.
667pub unsafe fn wasmer_call_trampoline(
668    trap_handler: Option<*const TrapHandlerFn<'static>>,
669    config: &VMConfig,
670    vmctx: VMFunctionContext,
671    trampoline: VMTrampoline,
672    callee: *const VMFunctionBody,
673    values_vec: *mut u8,
674) -> Result<(), Trap> {
675    catch_traps(trap_handler, config, || {
676        mem::transmute::<_, extern "C" fn(VMFunctionContext, *const VMFunctionBody, *mut u8)>(
677            trampoline,
678        )(vmctx, callee, values_vec);
679    })
680}
681
682/// Catches any wasm traps that happen within the execution of `closure`,
683/// returning them as a `Result`.
684///
685/// # Safety
686///
687/// Highly unsafe since `closure` won't have any dtors run.
688pub unsafe fn catch_traps<F, R>(
689    trap_handler: Option<*const TrapHandlerFn<'static>>,
690    config: &VMConfig,
691    closure: F,
692) -> Result<R, Trap>
693where
694    F: FnOnce() -> R,
695{
696    // Ensure that per-thread initialization is done.
697    lazy_per_thread_init()?;
698    let stack_size = config
699        .wasm_stack_size
700        .unwrap_or_else(|| DEFAULT_STACK_SIZE.load(Ordering::Relaxed));
701    on_wasm_stack(stack_size, trap_handler, closure).map_err(UnwindReason::into_trap)
702}
703
704// We need two separate thread-local variables here:
705// - YIELDER is set within the new stack and is used to unwind back to the root
706//   of the stack from inside it.
707// - TRAP_HANDLER is set from outside the new stack and is solely used from
708//   signal handlers. It must be atomic since it is used by signal handlers.
709//
710// We also do per-thread signal stack initialization on the first time
711// TRAP_HANDLER is accessed.
712thread_local! {
713    static YIELDER: Cell<Option<NonNull<Yielder<(), UnwindReason>>>> = Cell::new(None);
714    static TRAP_HANDLER: AtomicPtr<TrapHandlerContext> = AtomicPtr::new(ptr::null_mut());
715}
716
717/// Read-only information that is used by signal handlers to handle and recover
718/// from traps.
719#[allow(clippy::type_complexity)]
720struct TrapHandlerContext {
721    inner: *const u8,
722    handle_trap: fn(
723        *const u8,
724        usize,
725        usize,
726        Option<usize>,
727        Option<TrapCode>,
728        &mut dyn FnMut(TrapHandlerRegs),
729    ) -> bool,
730    custom_trap: Option<*const TrapHandlerFn<'static>>,
731}
732struct TrapHandlerContextInner<T> {
733    /// Information about the currently running coroutine. This is used to
734    /// reset execution to the root of the coroutine when a trap is handled.
735    coro_trap_handler: CoroutineTrapHandler<Result<T, UnwindReason>>,
736}
737
738impl TrapHandlerContext {
739    /// Runs the given function with a trap handler context. The previous
740    /// trap handler context is preserved and restored afterwards.
741    fn install<T, R>(
742        custom_trap: Option<*const TrapHandlerFn<'static>>,
743        coro_trap_handler: CoroutineTrapHandler<Result<T, UnwindReason>>,
744        f: impl FnOnce() -> R,
745    ) -> R {
746        // Type-erase the trap handler function so that it can be placed in TLS.
747        fn func<T>(
748            ptr: *const u8,
749            pc: usize,
750            sp: usize,
751            maybe_fault_address: Option<usize>,
752            trap_code: Option<TrapCode>,
753            update_regs: &mut dyn FnMut(TrapHandlerRegs),
754        ) -> bool {
755            unsafe {
756                (*(ptr as *const TrapHandlerContextInner<T>)).handle_trap(
757                    pc,
758                    sp,
759                    maybe_fault_address,
760                    trap_code,
761                    update_regs,
762                )
763            }
764        }
765        let inner = TrapHandlerContextInner { coro_trap_handler };
766        let ctx = Self {
767            inner: &inner as *const _ as *const u8,
768            handle_trap: func::<T>,
769            custom_trap,
770        };
771
772        compiler_fence(Ordering::Release);
773        let prev = TRAP_HANDLER.with(|ptr| {
774            let prev = ptr.load(Ordering::Relaxed);
775            ptr.store(&ctx as *const Self as *mut Self, Ordering::Relaxed);
776            prev
777        });
778
779        defer! {
780            TRAP_HANDLER.with(|ptr| ptr.store(prev, Ordering::Relaxed));
781            compiler_fence(Ordering::Acquire);
782        }
783
784        f()
785    }
786
787    /// Attempts to handle the trap if it's a wasm trap.
788    unsafe fn handle_trap(
789        pc: usize,
790        sp: usize,
791        maybe_fault_address: Option<usize>,
792        trap_code: Option<TrapCode>,
793        mut update_regs: impl FnMut(TrapHandlerRegs),
794        call_handler: impl Fn(&TrapHandlerFn<'static>) -> bool,
795    ) -> bool {
796        let ptr = TRAP_HANDLER.with(|ptr| ptr.load(Ordering::Relaxed));
797        if ptr.is_null() {
798            return false;
799        }
800
801        let ctx = &*ptr;
802
803        // Check if this trap is handled by a custom trap handler.
804        if let Some(trap_handler) = ctx.custom_trap {
805            if call_handler(&*trap_handler) {
806                return true;
807            }
808        }
809
810        (ctx.handle_trap)(
811            ctx.inner,
812            pc,
813            sp,
814            maybe_fault_address,
815            trap_code,
816            &mut update_regs,
817        )
818    }
819}
820
821impl<T> TrapHandlerContextInner<T> {
822    unsafe fn handle_trap(
823        &self,
824        pc: usize,
825        sp: usize,
826        maybe_fault_address: Option<usize>,
827        trap_code: Option<TrapCode>,
828        update_regs: &mut dyn FnMut(TrapHandlerRegs),
829    ) -> bool {
830        // Check if this trap occurred while executing on the Wasm stack. We can
831        // only recover from traps if that is the case.
832        if !self.coro_trap_handler.stack_ptr_in_bounds(sp) {
833            return false;
834        }
835
836        let signal_trap = trap_code.or_else(|| {
837            maybe_fault_address.map(|addr| {
838                if self.coro_trap_handler.stack_ptr_in_bounds(addr) {
839                    TrapCode::StackOverflow
840                } else {
841                    TrapCode::HeapAccessOutOfBounds
842                }
843            })
844        });
845
846        // Don't try to generate a backtrace for stack overflows: unwinding
847        // information is often not precise enough to properly describe what is
848        // happenning during a function prologue, which can lead the unwinder to
849        // read invalid memory addresses.
850        //
851        // See: https://github.com/rust-lang/backtrace-rs/pull/357
852        let backtrace = if signal_trap == Some(TrapCode::StackOverflow) {
853            Backtrace::from(vec![])
854        } else {
855            Backtrace::new_unresolved()
856        };
857
858        // Set up the register state for exception return to force the
859        // coroutine to return to its caller with UnwindReason::WasmTrap.
860        let unwind = UnwindReason::WasmTrap {
861            backtrace,
862            signal_trap,
863            pc,
864        };
865        let regs = self
866            .coro_trap_handler
867            .setup_trap_handler(move || Err(unwind));
868        update_regs(regs);
869        true
870    }
871}
872
873enum UnwindReason {
874    /// A panic caused by the host
875    Panic(Box<dyn Any + Send>),
876    /// A custom error triggered by the user
877    UserTrap(Box<dyn Error + Send + Sync>),
878    /// A Trap triggered by a wasm libcall
879    LibTrap(Trap),
880    /// A trap caused by the Wasm generated code
881    WasmTrap {
882        backtrace: Backtrace,
883        pc: usize,
884        signal_trap: Option<TrapCode>,
885    },
886}
887
888impl UnwindReason {
889    fn into_trap(self) -> Trap {
890        match self {
891            Self::UserTrap(data) => Trap::User(data),
892            Self::LibTrap(trap) => trap,
893            Self::WasmTrap {
894                backtrace,
895                pc,
896                signal_trap,
897            } => Trap::wasm(pc, backtrace, signal_trap),
898            Self::Panic(panic) => std::panic::resume_unwind(panic),
899        }
900    }
901}
902
903unsafe fn unwind_with(reason: UnwindReason) -> ! {
904    let yielder = YIELDER
905        .with(|cell| cell.replace(None))
906        .expect("not running on Wasm stack");
907
908    yielder.as_ref().suspend(reason);
909
910    // on_wasm_stack will forcibly reset the coroutine stack after yielding.
911    unreachable!();
912}
913
914/// Runs the given function on a separate stack so that its stack usage can be
915/// bounded. Stack overflows and other traps can be caught and execution
916/// returned to the root of the stack.
917fn on_wasm_stack<F: FnOnce() -> T, T>(
918    stack_size: usize,
919    trap_handler: Option<*const TrapHandlerFn<'static>>,
920    f: F,
921) -> Result<T, UnwindReason> {
922    // Allocating a new stack is pretty expensive since it involves several
923    // system calls. We therefore keep a cache of pre-allocated stacks which
924    // allows them to be reused multiple times.
925    // FIXME(Amanieu): We should refactor this to avoid the lock.
926    lazy_static::lazy_static! {
927        static ref STACK_POOL: crossbeam_queue::SegQueue<DefaultStack> = crossbeam_queue::SegQueue::new();
928    }
929    let stack = STACK_POOL
930        .pop()
931        .unwrap_or_else(|| DefaultStack::new(stack_size).unwrap());
932    let mut stack = scopeguard::guard(stack, |stack| STACK_POOL.push(stack));
933
934    // Create a coroutine with a new stack to run the function on.
935    let mut coro = ScopedCoroutine::with_stack(&mut *stack, move |yielder, ()| {
936        // Save the yielder to TLS so that it can be used later.
937        YIELDER.with(|cell| cell.set(Some(yielder.into())));
938
939        Ok(f())
940    });
941
942    // Ensure that YIELDER is reset on exit even if the coroutine panics,
943    defer! {
944        YIELDER.with(|cell| cell.set(None));
945    }
946
947    // Set up metadata for the trap handler for the duration of the coroutine
948    // execution. This is restored to its previous value afterwards.
949    TrapHandlerContext::install(trap_handler, coro.trap_handler(), || {
950        match coro.resume(()) {
951            CoroutineResult::Yield(trap) => {
952                // This came from unwind_with which requires that there be only
953                // Wasm code on the stack.
954                unsafe {
955                    coro.force_reset();
956                }
957                Err(trap)
958            }
959            CoroutineResult::Return(result) => result,
960        }
961    })
962}
963
964/// When executing on the Wasm stack, temporarily switch back to the host stack
965/// to perform an operation that should not be constrainted by the Wasm stack
966/// limits.
967///
968/// This is particularly important since the usage of the Wasm stack is under
969/// the control of untrusted code. Malicious code could artificially induce a
970/// stack overflow in the middle of a sensitive host operations (e.g. growing
971/// a memory) which would be hard to recover from.
972pub fn on_host_stack<F: FnOnce() -> T, T>(f: F) -> T {
973    // Reset YIEDER to None for the duration of this call to indicate that we
974    // are no longer on the Wasm stack.
975    let yielder_ptr = YIELDER.with(|cell| cell.replace(None));
976
977    // If we are already on the host stack, execute the function directly. This
978    // happens if a host function is called directly from the API.
979    let yielder = match yielder_ptr {
980        Some(ptr) => unsafe { ptr.as_ref() },
981        None => return f(),
982    };
983
984    // Restore YIELDER upon exiting normally or unwinding.
985    defer! {
986        YIELDER.with(|cell| cell.set(yielder_ptr));
987    }
988
989    // on_parent_stack requires the closure to be Send so that the Yielder
990    // cannot be called from the parent stack. This is not a problem for us
991    // since we don't expose the Yielder.
992    struct SendWrapper<T>(T);
993    unsafe impl<T> Send for SendWrapper<T> {}
994    let wrapped = SendWrapper(f);
995    yielder.on_parent_stack(move || {
996        let wrapped = wrapped;
997        (wrapped.0)()
998    })
999}
1000
1001#[cfg(windows)]
1002pub fn lazy_per_thread_init() -> Result<(), Trap> {
1003    // We need additional space on the stack to handle stack overflow
1004    // exceptions. Rust's initialization code sets this to 0x5000 but this
1005    // seems to be insufficient in practice.
1006    use windows_sys::Win32::System::Threading::SetThreadStackGuarantee;
1007    if unsafe { SetThreadStackGuarantee(&mut 0x10000) } == 0 {
1008        panic!("failed to set thread stack guarantee");
1009    }
1010
1011    Ok(())
1012}
1013
1014/// A module for registering a custom alternate signal stack (sigaltstack).
1015///
1016/// Rust's libstd installs an alternate stack with size `SIGSTKSZ`, which is not
1017/// always large enough for our signal handling code. Override it by creating
1018/// and registering our own alternate stack that is large enough and has a guard
1019/// page.
1020#[cfg(unix)]
1021pub fn lazy_per_thread_init() -> Result<(), Trap> {
1022    use std::ptr::null_mut;
1023
1024    thread_local! {
1025        /// Thread-local state is lazy-initialized on the first time it's used,
1026        /// and dropped when the thread exits.
1027        static TLS: Tls = unsafe { init_sigstack() };
1028    }
1029
1030    /// The size of the sigaltstack (not including the guard, which will be
1031    /// added). Make this large enough to run our signal handlers.
1032    const MIN_STACK_SIZE: usize = 16 * 4096;
1033
1034    enum Tls {
1035        OutOfMemory,
1036        Allocated {
1037            mmap_ptr: *mut libc::c_void,
1038            mmap_size: usize,
1039        },
1040        BigEnough,
1041    }
1042
1043    unsafe fn init_sigstack() -> Tls {
1044        // Check to see if the existing sigaltstack, if it exists, is big
1045        // enough. If so we don't need to allocate our own.
1046        let mut old_stack = mem::zeroed();
1047        let r = libc::sigaltstack(ptr::null(), &mut old_stack);
1048        assert_eq!(r, 0, "learning about sigaltstack failed");
1049        if old_stack.ss_flags & libc::SS_DISABLE == 0 && old_stack.ss_size >= MIN_STACK_SIZE {
1050            return Tls::BigEnough;
1051        }
1052
1053        // ... but failing that we need to allocate our own, so do all that
1054        // here.
1055        let page_size: usize = region::page::size();
1056        let guard_size = page_size;
1057        let alloc_size = guard_size + MIN_STACK_SIZE;
1058
1059        let ptr = libc::mmap(
1060            null_mut(),
1061            alloc_size,
1062            libc::PROT_NONE,
1063            libc::MAP_PRIVATE | libc::MAP_ANON,
1064            -1,
1065            0,
1066        );
1067        if ptr == libc::MAP_FAILED {
1068            return Tls::OutOfMemory;
1069        }
1070
1071        // Prepare the stack with readable/writable memory and then register it
1072        // with `sigaltstack`.
1073        let stack_ptr = (ptr as usize + guard_size) as *mut libc::c_void;
1074        let r = libc::mprotect(
1075            stack_ptr,
1076            MIN_STACK_SIZE,
1077            libc::PROT_READ | libc::PROT_WRITE,
1078        );
1079        assert_eq!(r, 0, "mprotect to configure memory for sigaltstack failed");
1080        let new_stack = libc::stack_t {
1081            ss_sp: stack_ptr,
1082            ss_flags: 0,
1083            ss_size: MIN_STACK_SIZE,
1084        };
1085        let r = libc::sigaltstack(&new_stack, ptr::null_mut());
1086        assert_eq!(r, 0, "registering new sigaltstack failed");
1087
1088        Tls::Allocated {
1089            mmap_ptr: ptr,
1090            mmap_size: alloc_size,
1091        }
1092    }
1093
1094    // Ensure TLS runs its initializer and return an error if it failed to
1095    // set up a separate stack for signal handlers.
1096    return TLS.with(|tls| {
1097        if let Tls::OutOfMemory = tls {
1098            Err(Trap::oom())
1099        } else {
1100            Ok(())
1101        }
1102    });
1103
1104    impl Drop for Tls {
1105        fn drop(&mut self) {
1106            let (ptr, size) = match self {
1107                Self::Allocated {
1108                    mmap_ptr,
1109                    mmap_size,
1110                } => (*mmap_ptr, *mmap_size),
1111                _ => return,
1112            };
1113            unsafe {
1114                // Deallocate the stack memory.
1115                let r = libc::munmap(ptr, size);
1116                debug_assert_eq!(r, 0, "munmap failed during thread shutdown");
1117            }
1118        }
1119    }
1120}