Add the following intrinsics to core::arch::x86_64
:
/// (Module documentation)
///
/// Semantics of volatile memory operations: volatile memory operations (reads and writes)
/// must be emitted by the compiler and cannot be re-ordered across other volatile operations.
///
/// The result of a volatile read is frozen (`freeze` is applied to it). That is,
/// reading uninitialized memory via a volatile read never returns uninitialized memory.
///
/// When volatile reads and writes participate in data-races with other volatile operations,
/// the result of what is read or written is indeterminate. A race betweeen:
///
/// * a volatile atomic read with volatile atomic writes reads the content of memory either before or after any of the writes -
/// the read will not observe partial modifications because the reads and the writes are atomic
/// * a volatile atomic write with other volatile atomic writes results in the content of any of the writes being
/// written to memory - the memory won't contain partial results of the different writes
///
/// Non-atomic volatile operation perform the reads and writes as a sequence of smaller volatile atomic
/// reads or writes. Data races result in partial results being read from or written to memory.
/// Volatile atomic 8-bit load
///
/// 8-bit volaitle atomic load from `x`.
///
/// If the load introduces a data-race, the result is indeterminate.
unsafe fn volatile_atomic_load_u8(x: *const u8) -> u8;
/// Volatile aligned atomic load u16/u32/u64
///
/// 16/32/64-bit volatile atomic load from aligned `x`.
///
/// If the load introduces a data-race, the result is indeterminate.
///
/// If `x` is not aligned, the behavior is undefined.
unsafe fn volatile_atomic_load_u16(x: *const u16) -> u16;
unsafe fn volatile_atomic_load_u32(x: *const u32) -> u32;
unsafe fn volatile_atomic_load_u64(x: *const u64) -> u64;
/// Volatile unaligned load u16/u32/u64/u128/256/512
///
/// Volatile 16/32/64/128/256/512-bit unaligned load.
///
/// This operation is not necessarily a single atomic load. The memory is read in a data-race free
/// way by performing either a single volatile atomic load, or multiple smaller volatile atomic
/// loads in an unspecified order .
///
/// If the load introduces a data-race, the result is indeterminate.
unsafe fn volatile_load_u16_unaligned(x: *const u16) -> u16;
unsafe fn volatile_load_u32_unaligned(x: *const u32) -> u32;
unsafe fn volatile_load_u64_unaligned(x: *const u64) -> u64;
#[target_feature(enable = "sse")]
unsafe fn volatile_load_u128_unaligned(x: *const u128) -> u128;
#[target_feature(enable = "avx")]
unsafe fn volatile_load_256_unaligned(x: *const [u8; 32]) -> [u8; 32];
#[target_feature(enable = "avx512")]
unsafe fn volatile_load_512_unaligned(x: *const [u8; 64]) -> [u8; 64];
/// Volatile atomic 8-bit write
///
/// 8-bit volaitle atomic write of `x` to `ptr`.
///
/// If there is a data-race with another volatile atomic write to `ptr`,
/// the memory written to `ptr` is indeterminate.
unsafe fn volatile_atomic_write_u8(x: *mut ptr, x: u8);
/// Volatile aligned atomic write u16/u32/u64
///
/// 16/32/64-bit volatile atomic wrote of `x` to `ptr`.
///
/// If there is a data-race with another volatile atomic write to `ptr`,
/// the memory written to `ptr` is indeterminate.
///
/// If `x` is not aligned, the behavior is undefined.
unsafe fn volatile_atomic_write_u16(ptr: *mut u16, x: u16);
unsafe fn volatile_atomic_write_u32(ptr: *mut u32, x: u32);
unsafe fn volatile_atomic_write_u64(ptr: *mut u64, x: u64);
/// Volatile unaligned write u16/u32/u64/u128/256/512
///
/// Volatile 16/32/64/128/256/512-bit unaligned write of `x` to `ptr`.
///
/// This operation is not necessarily a single atomic write. The memory is written to `ptr`
/// in a data-race free way by performing either a single volatile atomic write, or multiple
/// smaller volatile atomic writes in an unspecified order .
///
/// If there is a data-race with another volatile write to `ptr`,
/// the memory written to `ptr` is indeterminate.
unsafe fn volatile_write_u16_unaligned(ptr: *mut u16, x: u16);
unsafe fn volatile_write_u32_unaligned(ptr: *mut u32, x: u32);
unsafe fn volatile_write_u64_unaligned(ptr: *mut u64, x: u64);
#[target_feature(enable = "sse")]
unsafe fn volatile_write_u128_unaligned(ptr: *mut u128, x: u128);
#[target_feature(enable = "avx")]
unsafe fn volatile_write_256_unaligned(ptr: *mut [u8; 32], x: [u8; 32]);
#[target_feature(enable = "avx512")]
unsafe fn volatile_write_512_unaligned(ptr: *mut [u8; 64], x: [u8; 64]);