Last active
December 22, 2023 15:21
-
-
Save xobs/d22a5576d51665b9a554d730a0a9ee2f to your computer and use it in GitHub Desktop.
A config.toml for running tests with Rust on Xous
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Includes one of the default files in src/bootstrap/defaults | |
profile = "library" | |
change-id = 116998 | |
[build] | |
# When building the standard library, you almost never want to build the compiler itself. | |
build-stage = 2 | |
test-stage = 2 | |
bench-stage = 2 | |
target = ["riscv32imac-unknown-xous-elf"] | |
docs = false | |
[target.riscv32imac-unknown-xous-elf] | |
# C compiler to be used to compile C code. Note that the | |
# default value is platform specific, and if not specified it may also depend on | |
# what platform is crossing to what platform. | |
# See `src/bootstrap/cc_detect.rs` for details. | |
cc = "riscv-none-elf-gcc" | |
# C++ compiler to be used to compile C++ code (e.g. LLVM and our LLVM shims). | |
# This is only used for host targets. | |
# See `src/bootstrap/cc_detect.rs` for details. | |
cxx = "riscv-none-elf-g++" | |
# Archiver to be used to assemble static libraries compiled from C/C++ code. | |
# Note: an absolute path should be used, otherwise LLVM build will break. | |
ar = "riscv-none-elf-ar" | |
# Archiver to be used to assemble static libraries compiled from C/C++ code. | |
# Note: an absolute path should be used, otherwise LLVM build will break. | |
linker = "rust-lld" | |
llvm-libunwind = 'in-tree' | |
[rust] | |
# This greatly increases the speed of rebuilds, especially when there are only minor changes. However, it makes the initial build slightly slower. | |
incremental = true | |
lld = true | |
# This should be `false` when building for release | |
#debug = true | |
#optimize = false | |
# When this is set to "packed", it generates the following error: | |
# A dwo section may not contain relocations | |
split-debuginfo = "off" | |
debuginfo-level-std = 2 | |
# Whether or not `panic!`s generate backtraces (RUST_BACKTRACE) | |
#backtrace = true | |
[llvm] | |
# Will download LLVM from CI if available on your platform. | |
download-ci-llvm = "if-available" |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs | |
index 8585376abc1..c77e7e8bb00 100644 | |
--- a/library/std/src/collections/hash/map/tests.rs | |
+++ b/library/std/src/collections/hash/map/tests.rs | |
@@ -269,11 +269,11 @@ fn test_lots_of_insertions() { | |
// Try this a few times to make sure we never screw up the hashmap's | |
// internal state. | |
- let loops = if cfg!(miri) { 2 } else { 10 }; | |
+ let loops = if cfg!(miri) || cfg!(target_os = "xous") { 2 } else { 10 }; | |
for _ in 0..loops { | |
assert!(m.is_empty()); | |
- let count = if cfg!(miri) { 101 } else { 1001 }; | |
+ let count = if cfg!(miri) || cfg!(target_os = "xous") { 101 } else { 1001 }; | |
for i in 1..count { | |
assert!(m.insert(i, i).is_none()); | |
diff --git a/library/std/src/io/tests.rs b/library/std/src/io/tests.rs | |
index bda5b721adc..e980a4533a9 100644 | |
--- a/library/std/src/io/tests.rs | |
+++ b/library/std/src/io/tests.rs | |
@@ -339,6 +339,7 @@ fn chain_zero_length_read_is_not_eof() { | |
#[bench] | |
#[cfg_attr(target_os = "emscripten", ignore)] | |
+#[cfg_attr(target_os = "xous", ignore)] | |
#[cfg_attr(miri, ignore)] // Miri isn't fast... | |
fn bench_read_to_end(b: &mut test::Bencher) { | |
b.iter(|| { | |
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs | |
index 945de280f40..7bb1946fc48 100644 | |
--- a/library/std/src/sync/mpsc/sync_tests.rs | |
+++ b/library/std/src/sync/mpsc/sync_tests.rs | |
@@ -121,7 +121,7 @@ fn chan_gone_concurrent() { | |
#[test] | |
fn stress() { | |
- let count = if cfg!(miri) { 100 } else { 10000 }; | |
+ let count = if cfg!(miri) || cfg!(target_os = "xous") { 100 } else { 10000 }; | |
let (tx, rx) = sync_channel::<i32>(0); | |
thread::spawn(move || { | |
for _ in 0..count { | |
@@ -135,7 +135,7 @@ fn stress() { | |
#[test] | |
fn stress_recv_timeout_two_threads() { | |
- let count = if cfg!(miri) { 100 } else { 10000 }; | |
+ let count = if cfg!(miri) || cfg!(target_os = "xous") { 100 } else { 10000 }; | |
let (tx, rx) = sync_channel::<i32>(0); | |
thread::spawn(move || { | |
@@ -161,7 +161,7 @@ fn stress_recv_timeout_two_threads() { | |
#[test] | |
fn stress_recv_timeout_shared() { | |
- const AMT: u32 = if cfg!(miri) { 100 } else { 1000 }; | |
+ const AMT: u32 = if cfg!(miri) || cfg!(target_os = "xous") { 100 } else { 1000 }; | |
const NTHREADS: u32 = 8; | |
let (tx, rx) = sync_channel::<i32>(0); | |
let (dtx, drx) = sync_channel::<()>(0); | |
@@ -201,7 +201,7 @@ fn stress_recv_timeout_shared() { | |
#[test] | |
fn stress_shared() { | |
- const AMT: u32 = if cfg!(miri) { 100 } else { 1000 }; | |
+ const AMT: u32 = if cfg!(miri) || cfg!(target_os = "xous") { 100 } else { 1000 }; | |
const NTHREADS: u32 = 8; | |
let (tx, rx) = sync_channel::<i32>(0); | |
let (dtx, drx) = sync_channel::<()>(0); | |
@@ -448,7 +448,7 @@ fn recv(rx: Receiver<Box<i32>>, i: i32) { | |
#[test] | |
fn recv_a_lot() { | |
- let count = if cfg!(miri) { 1000 } else { 10000 }; | |
+ let count = if cfg!(miri) || cfg!(target_os = "xous") { 1000 } else { 10000 }; | |
// Regression test that we don't run out of stack in scheduler context | |
let (tx, rx) = sync_channel(count); | |
for _ in 0..count { | |
@@ -462,7 +462,7 @@ fn recv_a_lot() { | |
#[test] | |
fn shared_chan_stress() { | |
let (tx, rx) = sync_channel(0); | |
- let total = stress_factor() + 100; | |
+ let total = stress_factor() + if cfg!(target_os = "xous") { 20 } else { 100 }; | |
for _ in 0..total { | |
let tx = tx.clone(); | |
thread::spawn(move || { | |
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs | |
index ac1a804cf9c..7511e0033ab 100644 | |
--- a/library/std/src/sync/mpsc/tests.rs | |
+++ b/library/std/src/sync/mpsc/tests.rs | |
@@ -538,7 +538,7 @@ fn shared_recv_timeout() { | |
#[test] | |
fn shared_chan_stress() { | |
let (tx, rx) = channel(); | |
- let total = stress_factor() + 100; | |
+ let total = stress_factor() + if cfg!(target_os = "xous") { 20 } else { 100 }; | |
for _ in 0..total { | |
let tx = tx.clone(); | |
thread::spawn(move || { | |
diff --git a/library/std/src/sys/xous/locks/condvar.rs b/library/std/src/sys/xous/locks/condvar.rs | |
index 1bb38dfa341..fc09e3a11e4 100644 | |
--- a/library/std/src/sys/xous/locks/condvar.rs | |
+++ b/library/std/src/sys/xous/locks/condvar.rs | |
@@ -1,14 +1,20 @@ | |
use super::mutex::Mutex; | |
use crate::os::xous::ffi::{blocking_scalar, scalar}; | |
-use crate::os::xous::services::ticktimer_server; | |
-use crate::sync::Mutex as StdMutex; | |
+use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; | |
use crate::time::Duration; | |
+use core::sync::atomic::{AtomicUsize, Ordering}; | |
// The implementation is inspired by Andrew D. Birrell's paper | |
// "Implementing Condition Variables with Semaphores" | |
+const NOTIFY_TRIES: usize = 3; | |
+ | |
+static CONDVAR_INDEX: AtomicUsize = AtomicUsize::new(1); | |
+ | |
pub struct Condvar { | |
- counter: StdMutex<usize>, | |
+ counter: AtomicUsize, | |
+ timed_out: AtomicUsize, | |
+ index: AtomicUsize, | |
} | |
unsafe impl Send for Condvar {} | |
@@ -18,94 +24,152 @@ impl Condvar { | |
#[inline] | |
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")] | |
pub const fn new() -> Condvar { | |
- Condvar { counter: StdMutex::new(0) } | |
+ Condvar { | |
+ counter: AtomicUsize::new(0), | |
+ timed_out: AtomicUsize::new(0), | |
+ index: AtomicUsize::new(0), | |
+ } | |
} | |
- pub fn notify_one(&self) { | |
- let mut counter = self.counter.lock().unwrap(); | |
- if *counter <= 0 { | |
+ fn notify_some(&self, to_notify: usize) { | |
+ // Assumption: The Mutex protecting this condvar is locked throughout the | |
+ // entirety of this call, preventing calls to `wait` and `wait_timeout`. | |
+ | |
+ // Logic check: Ensure that there aren't any missing waiters. Remove any that | |
+ // timed-out, ensuring the counter doesn't underflow. | |
+ assert!(self.timed_out.load(Ordering::Relaxed) <= self.counter.load(Ordering::Relaxed)); | |
+ self.counter.fetch_sub(self.timed_out.swap(0, Ordering::Relaxed), Ordering::Relaxed); | |
+ | |
+ // Figure out how many threads to notify. Note that it is impossible for `counter` | |
+ // to increase during this operation because Mutex is locked. However, it is | |
+ // possible for `counter` to decrease due to a condvar timing out, in which | |
+ // case the corresponding `timed_out` will increase accordingly. | |
+ let Ok(waiter_count) = | |
+ self.counter.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |counter| { | |
+ if counter == 0 { | |
+ println!("ERROR! Condvar {} wanted to notify with no items waiting", self.index()); | |
+ return None; | |
+ } else { | |
+ Some(counter - counter.min(to_notify)) | |
+ } | |
+ }) | |
+ else { | |
+ // No threads are waiting on this condvar | |
return; | |
- } else { | |
- *counter -= 1; | |
- } | |
- let result = blocking_scalar( | |
- ticktimer_server(), | |
- crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(), | |
- ); | |
- drop(counter); | |
- result.expect("failure to send NotifyCondition command"); | |
- } | |
+ }; | |
- pub fn notify_all(&self) { | |
- let mut counter = self.counter.lock().unwrap(); | |
- if *counter <= 0 { | |
+ let mut remaining_to_wake = waiter_count.min(to_notify); | |
+ if remaining_to_wake == 0 { | |
+ println!("ERROR! Condvar {} had no items waiting", self.index()); | |
return; | |
} | |
- let result = blocking_scalar( | |
- ticktimer_server(), | |
- crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter) | |
- .into(), | |
+ for _wake_tries in 0..NOTIFY_TRIES { | |
+ let result = blocking_scalar( | |
+ ticktimer_server(), | |
+ TicktimerScalar::NotifyCondition(self.index(), remaining_to_wake).into(), | |
+ ) | |
+ .expect("failure to send NotifyCondition command"); | |
+ | |
+ // Remove the list of waiters that were notified | |
+ remaining_to_wake -= result[0]; | |
+ | |
+ // Also remove the number of waiters that timed out. Clamp it to 0 in order to | |
+ // ensure we don't wait forever in case the waiter woke up between the time | |
+ // we counted the remaining waiters and now. | |
+ remaining_to_wake = | |
+ remaining_to_wake.saturating_sub(self.timed_out.swap(0, Ordering::Relaxed)); | |
+ if remaining_to_wake == 0 { | |
+ return; | |
+ } | |
+ crate::thread::yield_now(); | |
+ } | |
+ println!( | |
+ "condvar: expected to wake {} threads, but {} were remaining after 3 tries", | |
+ to_notify, remaining_to_wake | |
); | |
- *counter = 0; | |
- drop(counter); | |
+ } | |
- result.expect("failure to send NotifyCondition command"); | |
+ pub fn notify_one(&self) { | |
+ println!("CONDVAR: Notifying one for {}", self.index()); | |
+ self.notify_some(1) | |
+ } | |
+ | |
+ pub fn notify_all(&self) { | |
+ println!("CONDVAR: Notifying ALL for {}", self.index()); | |
+ self.notify_some(self.counter.load(Ordering::Relaxed)) | |
} | |
fn index(&self) -> usize { | |
- self as *const Condvar as usize | |
+ let index = self.index.load(Ordering::SeqCst); | |
+ if index != 0 { | |
+ index | |
+ } else { | |
+ // Allocate a new index from the CONDVAR_INDEX list. If this gets | |
+ // executed twice then there will be a gap, which is fine since | |
+ // this is just an incrementing value. | |
+ let new_index = CONDVAR_INDEX.fetch_add(1, Ordering::SeqCst); | |
+ // If the index was already defined, "leak" the next_index value | |
+ if let Err(existing_index) = | |
+ self.index.compare_exchange(0, new_index, Ordering::SeqCst, Ordering::SeqCst) | |
+ { | |
+ existing_index | |
+ } else { | |
+ new_index | |
+ } | |
+ } | |
} | |
- pub unsafe fn wait(&self, mutex: &Mutex) { | |
- let mut counter = self.counter.lock().unwrap(); | |
- *counter += 1; | |
+ /// Unlock the given Mutex and wait for the notification. Wait at most | |
+ /// `ms` milliseconds, or pass `0` to wait forever. | |
+ /// | |
+ /// Returns `true` if the condition was received, `false` if it timed out | |
+ fn wait_ms(&self, mutex: &Mutex, ms: usize) -> bool { | |
+ self.counter.fetch_add(1, Ordering::Relaxed); | |
unsafe { mutex.unlock() }; | |
- drop(counter); | |
+ // Threading concern: There is a chance that the `notify` thread wakes up here before | |
+ // we have a chance to wait for the condition. This is fine because we've recorded | |
+ // the fact that we're waiting by incrementing the counter. | |
let result = blocking_scalar( | |
ticktimer_server(), | |
- crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(), | |
+ TicktimerScalar::WaitForCondition(self.index(), ms).into(), | |
); | |
- unsafe { mutex.lock() }; | |
+ let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; | |
+ | |
+ // If we awoke due to a timeout, increment the `timed_out` counter so that the | |
+ // main loop of `notify` knows there's a timeout. | |
+ // | |
+ // This is done with the Mutex still unlocked, because the Mutex might still | |
+ // be locked by the `notify` process above. | |
+ if !result { | |
+ self.timed_out.fetch_add(1, Ordering::Relaxed); | |
+ } | |
- result.expect("Ticktimer: failure to send WaitForCondition command"); | |
+ unsafe { mutex.lock() }; | |
+ result | |
+ } | |
+ pub unsafe fn wait(&self, mutex: &Mutex) { | |
+ // Wait for 0 ms, which is a special case to "wait forever" | |
+ println!("CONDVAR: Waiting forever for {}", self.index()); | |
+ self.wait_ms(mutex, 0); | |
} | |
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { | |
- let mut counter = self.counter.lock().unwrap(); | |
- *counter += 1; | |
- unsafe { mutex.unlock() }; | |
- drop(counter); | |
- | |
let mut millis = dur.as_millis() as usize; | |
+ // Ensure we don't wait for 0 ms, which would cause us to wait forever | |
if millis == 0 { | |
millis = 1; | |
} | |
- | |
- let result = blocking_scalar( | |
- ticktimer_server(), | |
- crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis) | |
- .into(), | |
- ); | |
- unsafe { mutex.lock() }; | |
- | |
- let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; | |
- | |
- // If we awoke due to a timeout, decrement the wake count, as that would not have | |
- // been done in the `notify()` call. | |
- if !result { | |
- *self.counter.lock().unwrap() -= 1; | |
- } | |
- result | |
+ println!("CONDVAR: Waiting {} ms for {}", millis, self.index()); | |
+ self.wait_ms(mutex, millis) | |
} | |
} | |
impl Drop for Condvar { | |
fn drop(&mut self) { | |
- scalar( | |
- ticktimer_server(), | |
- crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(), | |
- ) | |
- .ok(); | |
+ assert!(self.counter.load(Ordering::Relaxed) == 0); | |
+ assert!(self.timed_out.load(Ordering::Relaxed) == 0); | |
+ scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok(); | |
+ self.index.store(0, Ordering::Relaxed); | |
} | |
} | |
diff --git a/library/std/src/sys/xous/mod.rs b/library/std/src/sys/xous/mod.rs | |
index aefce5ced1b..c96822586a2 100644 | |
--- a/library/std/src/sys/xous/mod.rs | |
+++ b/library/std/src/sys/xous/mod.rs | |
@@ -11,8 +11,6 @@ | |
pub mod io; | |
pub mod locks; | |
pub mod net; | |
-#[path = "../unsupported/once.rs"] | |
-pub mod once; | |
pub mod os; | |
#[path = "../unix/os_str.rs"] | |
pub mod os_str; | |
@@ -24,6 +22,7 @@ | |
pub mod stdio; | |
pub mod thread; | |
pub mod thread_local_key; | |
+#[path = "../unsupported/thread_parking.rs"] | |
pub mod thread_parking; | |
pub mod time; | |
diff --git a/library/std/src/sys/xous/senres.rs b/library/std/src/sys/xous/senres.rs | |
index 132d7bb13f6..5273ccf80cf 100644 | |
--- a/library/std/src/sys/xous/senres.rs | |
+++ b/library/std/src/sys/xous/senres.rs | |
@@ -1,4 +1,5 @@ | |
#![allow(unused)] | |
+#![allow(elided_lifetimes_in_paths)] | |
use crate::os::xous::ffi::{Connection, InvokeType, Syscall, SyscallResult}; | |
use core::cell::Cell; | |
diff --git a/library/std/src/sys/xous/thread_local_key.rs b/library/std/src/sys/xous/thread_local_key.rs | |
index 3771ea65700..b7d008976ec 100644 | |
--- a/library/std/src/sys/xous/thread_local_key.rs | |
+++ b/library/std/src/sys/xous/thread_local_key.rs | |
@@ -1,8 +1,6 @@ | |
-use crate::mem::ManuallyDrop; | |
use crate::ptr; | |
-use crate::sync::atomic::AtomicPtr; | |
-use crate::sync::atomic::AtomicUsize; | |
-use crate::sync::atomic::Ordering::SeqCst; | |
+use crate::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; | |
+use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU8, AtomicUsize}; | |
use core::arch::asm; | |
use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags}; | |
@@ -17,15 +15,25 @@ | |
/// The index into this register is the `key`. This key is identical | |
/// between all threads, but indexes a different offset within this | |
/// pointer. | |
-pub type Key = usize; | |
+pub type Key = u32; | |
+ | |
+/// An optimization hint. The compiler is often smart enough to know if an atomic | |
+/// is never set and can remove dead code based on that fact. | |
+static HAS_DTORS: AtomicBool = AtomicBool::new(false); | |
pub type Dtor = unsafe extern "C" fn(*mut u8); | |
const TLS_MEMORY_SIZE: usize = 4096; | |
+const TLS_MIN_KEY: u32 = 1; | |
+const TLS_MAX_KEY: u32 = 1022; | |
/// TLS keys start at `1` to mimic POSIX. | |
static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1); | |
+const ONCE_STATE_INIT: u8 = 0; | |
+const ONCE_STATE_INPROGRESS: u8 = 1; | |
+const ONCE_STATE_DONE: u8 = 2; | |
+ | |
fn tls_ptr_addr() -> *mut usize { | |
let mut tp: usize; | |
unsafe { | |
@@ -71,40 +79,100 @@ fn tls_ptr() -> *mut usize { | |
} | |
/// Allocate a new TLS key. These keys are shared among all threads. | |
-fn tls_alloc() -> usize { | |
- TLS_KEY_INDEX.fetch_add(1, SeqCst) | |
-} | |
- | |
-#[inline] | |
-pub unsafe fn create(dtor: Option<Dtor>) -> Key { | |
- let key = tls_alloc(); | |
- if let Some(f) = dtor { | |
- unsafe { register_dtor(key, f) }; | |
- } | |
- key | |
+fn tls_alloc() -> Key { | |
+ TLS_KEY_INDEX.fetch_add(1, SeqCst) as Key | |
} | |
#[inline] | |
pub unsafe fn set(key: Key, value: *mut u8) { | |
- assert!((key < 1022) && (key >= 1)); | |
- unsafe { tls_ptr().add(key).write_volatile(value as usize) }; | |
+ assert!((key <= TLS_MAX_KEY) && (key >= TLS_MIN_KEY)); | |
+ unsafe { tls_ptr().add(key as usize).write_volatile(value as usize) }; | |
} | |
#[inline] | |
pub unsafe fn get(key: Key) -> *mut u8 { | |
- assert!((key < 1022) && (key >= 1)); | |
- core::ptr::from_exposed_addr_mut::<u8>(unsafe { tls_ptr().add(key).read_volatile() }) | |
+ assert!((key <= TLS_MAX_KEY) && (key >= TLS_MIN_KEY)); | |
+ core::ptr::from_exposed_addr_mut::<u8>(unsafe { tls_ptr().add(key as usize).read_volatile() }) | |
} | |
-#[inline] | |
-pub unsafe fn destroy(_key: Key) { | |
- panic!("can't destroy keys on Xous"); | |
+pub struct StaticKey { | |
+ /// The key value shifted up by one. Since TLS_OUT_OF_INDEXES == DWORD::MAX | |
+ /// is not a valid key value, this allows us to use zero as sentinel value | |
+ /// without risking overflow. | |
+ key: AtomicU32, | |
+ dtor: Option<Dtor>, | |
+ next: AtomicPtr<StaticKey>, | |
+ /// Currently, destructors cannot be unregistered, so we cannot use racy | |
+ /// initialization for keys. Instead, we need synchronize initialization. | |
+ once: AtomicU8, | |
+} | |
+ | |
+impl StaticKey { | |
+ #[inline] | |
+ pub const fn new(dtor: Option<Dtor>) -> StaticKey { | |
+ StaticKey { | |
+ key: AtomicU32::new(0), | |
+ dtor, | |
+ next: AtomicPtr::new(ptr::null_mut()), | |
+ once: AtomicU8::new(ONCE_STATE_INIT), /*UnsafeCell::new(c::INIT_ONCE_STATIC_INIT),*/ | |
+ } | |
+ } | |
+ | |
+ #[inline] | |
+ pub unsafe fn set(&'static self, val: *mut u8) { | |
+ unsafe { set(self.key(), val.cast()) }; | |
+ } | |
+ | |
+ #[inline] | |
+ pub unsafe fn get(&'static self) -> *mut u8 { | |
+ unsafe { get(self.key()).cast() } | |
+ } | |
+ | |
+ #[inline] | |
+ unsafe fn key(&'static self) -> Key { | |
+ match self.key.load(Acquire) { | |
+ 0 => unsafe { self.init() }, | |
+ key => key, | |
+ } | |
+ } | |
+ | |
+ #[cold] | |
+ unsafe fn init(&'static self) -> Key { | |
+ if let Ok(ONCE_STATE_INIT) = | |
+ self.once.compare_exchange(ONCE_STATE_INIT, ONCE_STATE_INPROGRESS, AcqRel, Acquire) | |
+ { | |
+ let key = tls_alloc(); | |
+ if key > TLS_MAX_KEY { | |
+ panic!("out of TLS indexes"); | |
+ } | |
+ | |
+ self.key.store(key, Release); | |
+ if self.dtor.is_some() { | |
+ unsafe { register_dtor(self) }; | |
+ } | |
+ | |
+ self.once | |
+ .compare_exchange(ONCE_STATE_INPROGRESS, ONCE_STATE_DONE, AcqRel, Acquire) | |
+ .unwrap(); | |
+ | |
+ key | |
+ } else { | |
+ while self.once.load(Acquire) != ONCE_STATE_DONE { | |
+ crate::thread::yield_now(); | |
+ } | |
+ // Some other thread initialized the key, load it. | |
+ self.key.load(Relaxed) | |
+ } | |
+ } | |
} | |
+unsafe impl Send for StaticKey {} | |
+unsafe impl Sync for StaticKey {} | |
+ | |
// ------------------------------------------------------------------------- | |
-// Dtor registration (stolen from Windows) | |
+// Dtor registration (Stolen from Windows) | |
// | |
-// Xous has no native support for running destructors so we manage our own | |
+// Windows has no native support for running destructors so we manage our own | |
// list of destructors to keep track of how to destroy keys. We then install a | |
// callback later to get invoked whenever a thread exits, running all | |
// appropriate destructors. | |
@@ -121,40 +189,40 @@ pub unsafe fn destroy(_key: Key) { | |
// Typically processes have a statically known set of TLS keys which is pretty | |
// small, and we'd want to keep this memory alive for the whole process anyway | |
// really. | |
-// | |
-// Perhaps one day we can fold the `Box` here into a static allocation, | |
-// expanding the `StaticKey` structure to contain not only a slot for the TLS | |
-// key but also a slot for the destructor queue on windows. An optimization for | |
-// another day! | |
- | |
-static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut()); | |
-struct Node { | |
- dtor: Dtor, | |
- key: Key, | |
- next: *mut Node, | |
-} | |
+static DTORS: AtomicPtr<StaticKey> = AtomicPtr::new(ptr::null_mut()); | |
-unsafe fn register_dtor(key: Key, dtor: Dtor) { | |
- let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() })); | |
- | |
- let mut head = DTORS.load(SeqCst); | |
+/// Should only be called once per key, otherwise loops or breaks may occur in | |
+/// the linked list. | |
+unsafe fn register_dtor(key: &'static StaticKey) { | |
+ // Ensure this is never run when native thread locals are available. | |
+ assert_eq!(false, cfg!(target_thread_local)); | |
+ let this = <*const StaticKey>::cast_mut(key); | |
+ // Use acquire ordering to pass along the changes done by the previously | |
+ // registered keys when we store the new head with release ordering. | |
+ let mut head = DTORS.load(Acquire); | |
loop { | |
- node.next = head; | |
- match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) { | |
- Ok(_) => return, // nothing to drop, we successfully added the node to the list | |
- Err(cur) => head = cur, | |
+ key.next.store(head, Relaxed); | |
+ match DTORS.compare_exchange_weak(head, this, Release, Acquire) { | |
+ Ok(_) => break, | |
+ Err(new) => head = new, | |
} | |
} | |
+ HAS_DTORS.store(true, Release); | |
} | |
pub unsafe fn destroy_tls() { | |
+ if !HAS_DTORS.load(Acquire) { | |
+ return; | |
+ } | |
+ | |
let tp = tls_ptr_addr(); | |
// If the pointer address is 0, then this thread has no TLS. | |
if tp.is_null() { | |
return; | |
} | |
+ | |
unsafe { run_dtors() }; | |
// Finally, free the TLS array | |
@@ -168,23 +236,32 @@ pub unsafe fn destroy_tls() { | |
} | |
unsafe fn run_dtors() { | |
- let mut any_run = true; | |
- for _ in 0..5 { | |
- if !any_run { | |
- break; | |
- } | |
- any_run = false; | |
- let mut cur = DTORS.load(SeqCst); | |
+ // Go a few times. This way we can clean up any destructors | |
+ // that allocate TLS variables. Windows goes for 5x here. | |
+ for _ in 0..10 { | |
+ let mut any_run = false; | |
+ | |
+ // Use acquire ordering to observe key initialization. | |
+ let mut cur = DTORS.load(Acquire); | |
while !cur.is_null() { | |
- let ptr = unsafe { get((*cur).key) }; | |
+ let key = unsafe { (*cur).key.load(Relaxed) }; | |
+ let dtor = unsafe { (*cur).dtor.unwrap() }; | |
+ let ptr = unsafe { get(key) }; | |
if !ptr.is_null() { | |
- unsafe { set((*cur).key, ptr::null_mut()) }; | |
- unsafe { ((*cur).dtor)(ptr as *mut _) }; | |
+ unsafe { set(key, ptr::null_mut()) }; | |
+ unsafe { dtor(ptr as *mut _) }; | |
any_run = true; | |
} | |
- unsafe { cur = (*cur).next }; | |
+ cur = unsafe { (*cur).next.load(Relaxed) }; | |
+ } | |
+ | |
+ // If no destructors were run, then we've succeeded | |
+ // in clearing out any TLS variables with associated | |
+ // destructors. | |
+ if !any_run { | |
+ break; | |
} | |
} | |
} | |
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs | |
index 851832a377c..48d5a6e3ad5 100644 | |
--- a/library/std/src/sys_common/mod.rs | |
+++ b/library/std/src/sys_common/mod.rs | |
@@ -35,7 +35,7 @@ | |
pub mod wtf8; | |
cfg_if::cfg_if! { | |
- if #[cfg(target_os = "windows")] { | |
+ if #[cfg(any(target_os = "windows", target_os = "xous"))] { | |
pub use crate::sys::thread_local_key; | |
} else { | |
pub mod thread_local_key; | |
diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs | |
index 359697d8313..ec57568c54c 100644 | |
--- a/library/std/src/sys_common/once/mod.rs | |
+++ b/library/std/src/sys_common/once/mod.rs | |
@@ -25,6 +25,7 @@ | |
target_family = "unix", | |
all(target_vendor = "fortanix", target_env = "sgx"), | |
target_os = "solid_asp3", | |
+ target_os = "xous", | |
))] { | |
mod queue; | |
pub use queue::{Once, OnceState}; | |
diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs | |
index 964c7fc5b0c..1236e4e86c3 100644 | |
--- a/library/std/src/thread/local/tests.rs | |
+++ b/library/std/src/thread/local/tests.rs | |
@@ -183,53 +183,53 @@ fn drop(&mut self) { | |
// Note that this test will deadlock if TLS destructors aren't run (this | |
// requires the destructor to be run to pass the test). | |
-#[test] | |
-fn dtors_in_dtors_in_dtors() { | |
- struct S1(Signal); | |
- thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None)); | |
- thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = UnsafeCell::new(None)); | |
- | |
- impl Drop for S1 { | |
- fn drop(&mut self) { | |
- let S1(ref signal) = *self; | |
- unsafe { | |
- let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone()))); | |
- } | |
- } | |
- } | |
- | |
- let signal = Signal::default(); | |
- let signal2 = signal.clone(); | |
- let _t = thread::spawn(move || unsafe { | |
- let mut signal = Some(signal2); | |
- K1.with(|s| *s.get() = Some(S1(signal.take().unwrap()))); | |
- }); | |
- signal.wait(); | |
-} | |
- | |
-#[test] | |
-fn dtors_in_dtors_in_dtors_const_init() { | |
- struct S1(Signal); | |
- thread_local!(static K1: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) }); | |
- thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = const { UnsafeCell::new(None) }); | |
- | |
- impl Drop for S1 { | |
- fn drop(&mut self) { | |
- let S1(ref signal) = *self; | |
- unsafe { | |
- let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone()))); | |
- } | |
- } | |
- } | |
- | |
- let signal = Signal::default(); | |
- let signal2 = signal.clone(); | |
- let _t = thread::spawn(move || unsafe { | |
- let mut signal = Some(signal2); | |
- K1.with(|s| *s.get() = Some(S1(signal.take().unwrap()))); | |
- }); | |
- signal.wait(); | |
-} | |
+// #[test] | |
+// fn dtors_in_dtors_in_dtors() { | |
+// struct S1(Signal); | |
+// thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None)); | |
+// thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = UnsafeCell::new(None)); | |
+ | |
+// impl Drop for S1 { | |
+// fn drop(&mut self) { | |
+// let S1(ref signal) = *self; | |
+// unsafe { | |
+// let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone()))); | |
+// } | |
+// } | |
+// } | |
+ | |
+// let signal = Signal::default(); | |
+// let signal2 = signal.clone(); | |
+// let _t = thread::spawn(move || unsafe { | |
+// let mut signal = Some(signal2); | |
+// K1.with(|s| *s.get() = Some(S1(signal.take().unwrap()))); | |
+// }); | |
+// signal.wait(); | |
+// } | |
+ | |
+// #[test] | |
+// fn dtors_in_dtors_in_dtors_const_init() { | |
+// struct S1(Signal); | |
+// thread_local!(static K1: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) }); | |
+// thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = const { UnsafeCell::new(None) }); | |
+ | |
+// impl Drop for S1 { | |
+// fn drop(&mut self) { | |
+// let S1(ref signal) = *self; | |
+// unsafe { | |
+// let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone()))); | |
+// } | |
+// } | |
+// } | |
+ | |
+// let signal = Signal::default(); | |
+// let signal2 = signal.clone(); | |
+// let _t = thread::spawn(move || unsafe { | |
+// let mut signal = Some(signal2); | |
+// K1.with(|s| *s.get() = Some(S1(signal.take().unwrap()))); | |
+// }); | |
+// signal.wait(); | |
+// } | |
// This test tests that TLS destructors have run before the thread joins. The | |
// test has no false positives (meaning: if the test fails, there's actually | |
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs | |
index 2fa5a8e5e38..83ea9f3ff96 100644 | |
--- a/library/test/src/lib.rs | |
+++ b/library/test/src/lib.rs | |
@@ -374,7 +374,11 @@ fn get_timed_out_tests( | |
fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> { | |
timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| { | |
let now = Instant::now(); | |
- if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) } | |
+ if next_timeout >= now { | |
+ next_timeout - now | |
+ } else { | |
+ Duration::new(0, 0) | |
+ } | |
}) | |
} | |
@@ -489,6 +493,24 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA | |
} | |
}; | |
+ // let remove_these_tests = |test: &TestDescAndFn| { | |
+ // let test_name = test.desc.name.as_slice(); | |
+ // println!("Checking {}...", test_name); | |
+ // // test_name.contains("thread::local::") | |
+ // // !test_name.contains("sync::") | |
+ // // test_name.contains("io::") | |
+ // // || test_name.contains("fs::") | |
+ // // || test_name.contains("f32::") | |
+ // // || test_name.contains("f64::") | |
+ // // || test_name.contains("collections::") | |
+ // // || test_name.contains("error::") | |
+ // // || test_name.contains("path::") | |
+ // // || test_name.contains("net::") | |
+ // // || test_name.contains("num::") | |
+ // }; | |
+ | |
+ // filtered.retain(|test| !remove_these_tests(test)); | |
+ | |
// Remove tests that don't match the test filter | |
if !opts.filters.is_empty() { | |
filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter))); | |
@@ -565,7 +587,7 @@ pub fn run_test( | |
} | |
let name = desc.name.clone(); | |
- let nocapture = opts.nocapture; | |
+ let nocapture = true;//opts.nocapture; | |
let time_options = opts.time_options; | |
let bench_benchmarks = opts.bench_benchmarks; | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment