1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
use crate::ffi::CStr;
use crate::io;
use crate::sys::{unsupported, Void};
use crate::time::Duration;

pub struct Thread(Void);

pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;

impl Thread {
    // unsafe: see thread::Builder::spawn_unchecked for safety requirements
    pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
        unsupported()
    }

    pub fn yield_now() {
        // do nothing
    }

    pub fn set_name(_name: &CStr) {
        // nope
    }

    #[cfg(not(target_feature = "atomics"))]
    pub fn sleep(_dur: Duration) {
        panic!("can't sleep");
    }

    #[cfg(target_feature = "atomics")]
    pub fn sleep(dur: Duration) {
        use crate::arch::wasm32;
        use crate::cmp;

        // Use an atomic wait to block the current thread artificially with a
        // timeout listed. Note that we should never be notified (return value
        // of 0) or our comparison should never fail (return value of 1) so we
        // should always only resume execution through a timeout (return value
        // 2).
        let mut nanos = dur.as_nanos();
        while nanos > 0 {
            let amt = cmp::min(i64::max_value() as u128, nanos);
            let mut x = 0;
            let val = unsafe { wasm32::i32_atomic_wait(&mut x, 0, amt as i64) };
            debug_assert_eq!(val, 2);
            nanos -= amt;
        }
    }

    pub fn join(self) {
        match self.0 {}
    }
}

pub mod guard {
    pub type Guard = !;
    pub unsafe fn current() -> Option<Guard> {
        None
    }
    pub unsafe fn init() -> Option<Guard> {
        None
    }
}

// This is only used by atomics primitives when the `atomics` feature is
// enabled. In that mode we currently just use our own thread-local to store our
// current thread's ID, and then we lazily initialize it to something allocated
// from a global counter.
#[cfg(target_feature = "atomics")]
pub fn my_id() -> u32 {
    use crate::sync::atomic::{AtomicU32, Ordering::SeqCst};

    static NEXT_ID: AtomicU32 = AtomicU32::new(0);

    #[thread_local]
    static mut MY_ID: u32 = 0;

    unsafe {
        // If our thread ID isn't set yet then we need to allocate one. Do so
        // with with a simple "atomically add to a global counter" strategy.
        // This strategy doesn't handled what happens when the counter
        // overflows, however, so just abort everything once the counter
        // overflows and eventually we could have some sort of recycling scheme
        // (or maybe this is all totally irrelevant by that point!). In any case
        // though we're using a CAS loop instead of a `fetch_add` to ensure that
        // the global counter never overflows.
        if MY_ID == 0 {
            let mut cur = NEXT_ID.load(SeqCst);
            MY_ID = loop {
                let next = cur.checked_add(1).unwrap_or_else(|| crate::arch::wasm32::unreachable());
                match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) {
                    Ok(_) => break next,
                    Err(i) => cur = i,
                }
            };
        }
        MY_ID
    }
}


Home ⌂Doc Index ◂Up ▴