Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update a bunch of library types for MCP807 #135236

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions library/alloc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@
#![feature(slice_range)]
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(temporary_niche_types)]
#![feature(trusted_fused)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
Expand Down
32 changes: 19 additions & 13 deletions library/alloc/src/raw_vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

use core::marker::PhantomData;
use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::num::niche_types::UsizeNoHighBit;
use core::ptr::{self, NonNull, Unique};
use core::{cmp, hint};

Expand Down Expand Up @@ -34,19 +35,20 @@ enum AllocInit {
}

#[repr(transparent)]
#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
struct Cap(usize);
struct Cap(UsizeNoHighBit);

impl Cap {
const ZERO: Cap = unsafe { Cap(0) };
const ZERO: Cap = Cap(unsafe { UsizeNoHighBit::new_unchecked(0) });

/// `Cap(cap)`, except if `T` is a ZST then `Cap::ZERO`.
///
/// # Safety: cap must be <= `isize::MAX`.
unsafe fn new<T>(cap: usize) -> Self {
if T::IS_ZST { Cap::ZERO } else { unsafe { Self(cap) } }
if T::IS_ZST { Cap::ZERO } else { Self(unsafe { UsizeNoHighBit::new_unchecked(cap) }) }
}

const fn as_usize(&self) -> usize {
self.0.as_inner()
}
}

Expand Down Expand Up @@ -483,7 +485,11 @@ impl<A: Allocator> RawVecInner<A> {
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc })
Ok(Self {
ptr: Unique::from(ptr.cast()),
cap: Cap(unsafe { UsizeNoHighBit::new_unchecked(capacity) }),
alloc,
})
}

#[inline]
Expand All @@ -508,7 +514,7 @@ impl<A: Allocator> RawVecInner<A> {

#[inline]
const fn capacity(&self, elem_size: usize) -> usize {
if elem_size == 0 { usize::MAX } else { self.cap.0 }
if elem_size == 0 { usize::MAX } else { self.cap.as_usize() }
}

#[inline]
Expand All @@ -518,15 +524,15 @@ impl<A: Allocator> RawVecInner<A> {

#[inline]
fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull<u8>, Layout)> {
if elem_layout.size() == 0 || self.cap.0 == 0 {
if elem_layout.size() == 0 || self.cap.as_usize() == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
// and could hypothetically handle differences between stride and size, but this memory
// has already been allocated so we know it can't overflow and currently Rust does not
// support such types. So we can do better by skipping some checks and avoid an unwrap.
unsafe {
let alloc_size = elem_layout.size().unchecked_mul(self.cap.0);
let alloc_size = elem_layout.size().unchecked_mul(self.cap.as_usize());
let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align());
Some((self.ptr.into(), layout))
}
Expand Down Expand Up @@ -562,7 +568,7 @@ impl<A: Allocator> RawVecInner<A> {
#[inline]
#[track_caller]
fn grow_one(&mut self, elem_layout: Layout) {
if let Err(err) = self.grow_amortized(self.cap.0, 1, elem_layout) {
if let Err(err) = self.grow_amortized(self.cap.as_usize(), 1, elem_layout) {
handle_error(err);
}
}
Expand Down Expand Up @@ -627,7 +633,7 @@ impl<A: Allocator> RawVecInner<A> {
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = Unique::from(ptr.cast());
self.cap = unsafe { Cap(cap) };
self.cap = Cap(unsafe { UsizeNoHighBit::new_unchecked(cap) });
}

fn grow_amortized(
Expand All @@ -650,7 +656,7 @@ impl<A: Allocator> RawVecInner<A> {

// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(self.cap.as_usize() * 2, required_cap);
let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap);

let new_layout = layout_array(cap, elem_layout)?;
Expand Down
4 changes: 4 additions & 0 deletions library/core/src/num/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ mod overflow_panic;
mod saturating;
mod wrapping;

/// 100% perma-unstable
#[doc(hidden)]
pub mod niche_types;

#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(no_fp_fmt_parse))]
pub use dec2flt::ParseFloatError;
Expand Down
162 changes: 162 additions & 0 deletions library/core/src/num/niche_types.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
#![unstable(
feature = "temporary_niche_types",
issue = "none",
reason = "for core, alloc, and std internals until pattern types are further along"
)]

use crate::cmp::Ordering;
use crate::fmt;
use crate::hash::{Hash, Hasher};
use crate::marker::StructuralPartialEq;

macro_rules! define_valid_range_type {
($(
$(#[$m:meta])*
$vis:vis struct $name:ident($int:ident as $uint:ident in $low:literal..=$high:literal);
)+) => {$(
#[derive(Clone, Copy, Eq)]
#[repr(transparent)]
#[rustc_layout_scalar_valid_range_start($low)]
#[rustc_layout_scalar_valid_range_end($high)]
$(#[$m])*
$vis struct $name($int);

const _: () = {
// With the `valid_range` attributes, it's always specified as unsigned
assert!(<$uint>::MIN == 0);
let ulow: $uint = $low;
let uhigh: $uint = $high;
assert!(ulow <= uhigh);

assert!(size_of::<$int>() == size_of::<$uint>());
};

impl $name {
#[inline]
pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: same precondition
unsafe { $name(val) }
}

#[inline]
pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
}

// This is required to allow matching a constant. We don't get it from a derive
// because the derived `PartialEq` would do a field projection, which is banned
// by <https://github.com/rust-lang/compiler-team/issues/807>.
impl StructuralPartialEq for $name {}

impl PartialEq for $name {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
}

impl Ord for $name {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
}

impl PartialOrd for $name {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
}

impl Hash for $name {
// Required method
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
}

impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
}
)+};
}

define_valid_range_type! {
pub struct Nanoseconds(u32 as u32 in 0..=999_999_999);
}

impl Nanoseconds {
// SAFETY: 0 is within the valid range
pub const ZERO: Self = unsafe { Nanoseconds::new_unchecked(0) };
}

impl Default for Nanoseconds {
#[inline]
fn default() -> Self {
Self::ZERO
}
}

define_valid_range_type! {
pub struct NonZeroU8Inner(u8 as u8 in 1..=0xff);
pub struct NonZeroU16Inner(u16 as u16 in 1..=0xff_ff);
pub struct NonZeroU32Inner(u32 as u32 in 1..=0xffff_ffff);
pub struct NonZeroU64Inner(u64 as u64 in 1..=0xffffffff_ffffffff);
pub struct NonZeroU128Inner(u128 as u128 in 1..=0xffffffffffffffff_ffffffffffffffff);

pub struct NonZeroI8Inner(i8 as u8 in 1..=0xff);
pub struct NonZeroI16Inner(i16 as u16 in 1..=0xff_ff);
pub struct NonZeroI32Inner(i32 as u32 in 1..=0xffff_ffff);
pub struct NonZeroI64Inner(i64 as u64 in 1..=0xffffffff_ffffffff);
pub struct NonZeroI128Inner(i128 as u128 in 1..=0xffffffffffffffff_ffffffffffffffff);
}

#[cfg(target_pointer_width = "16")]
define_valid_range_type! {
pub struct UsizeNoHighBit(usize as usize in 0..=0x7fff);
pub struct NonZeroUsizeInner(usize as usize in 1..=0xffff);
pub struct NonZeroIsizeInner(isize as usize in 1..=0xffff);
}
#[cfg(target_pointer_width = "32")]
define_valid_range_type! {
pub struct UsizeNoHighBit(usize as usize in 0..=0x7fff_ffff);
pub struct NonZeroUsizeInner(usize as usize in 1..=0xffff_ffff);
pub struct NonZeroIsizeInner(isize as usize in 1..=0xffff_ffff);
}
#[cfg(target_pointer_width = "64")]
define_valid_range_type! {
pub struct UsizeNoHighBit(usize as usize in 0..=0x7fff_ffff_ffff_ffff);
pub struct NonZeroUsizeInner(usize as usize in 1..=0xffff_ffff_ffff_ffff);
pub struct NonZeroIsizeInner(isize as usize in 1..=0xffff_ffff_ffff_ffff);
}

define_valid_range_type! {
pub struct U32NotAllOnes(u32 as u32 in 0..=0xffff_fffe);
pub struct I32NotAllOnes(i32 as u32 in 0..=0xffff_fffe);

pub struct U64NotAllOnes(u64 as u64 in 0..=0xffff_ffff_ffff_fffe);
pub struct I64NotAllOnes(i64 as u64 in 0..=0xffff_ffff_ffff_fffe);
}

pub trait NotAllOnesHelper {
type Type;
}
pub type NotAllOnes<T> = <T as NotAllOnesHelper>::Type;
impl NotAllOnesHelper for u32 {
type Type = U32NotAllOnes;
}
impl NotAllOnesHelper for i32 {
type Type = I32NotAllOnes;
}
impl NotAllOnesHelper for u64 {
type Type = U64NotAllOnes;
}
impl NotAllOnesHelper for i64 {
type Type = I64NotAllOnes;
}
31 changes: 1 addition & 30 deletions library/core/src/num/nonzero.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,41 +37,12 @@ pub unsafe trait ZeroablePrimitive: Sized + Copy + private::Sealed {
macro_rules! impl_zeroable_primitive {
($($NonZeroInner:ident ( $primitive:ty )),+ $(,)?) => {
mod private {
use super::*;

#[unstable(
feature = "nonzero_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
pub trait Sealed {}

$(
// This inner type is never shown directly, so intentionally does not have Debug
#[expect(missing_debug_implementations)]
// Since this struct is non-generic and derives Copy,
// the derived Clone is `*self` and thus doesn't field-project.
#[derive(Clone, Copy)]
#[repr(transparent)]
#[rustc_layout_scalar_valid_range_start(1)]
#[rustc_nonnull_optimization_guaranteed]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't see this attribute in the new code for the non zero integers

#[unstable(
feature = "nonzero_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
pub struct $NonZeroInner($primitive);

// This is required to allow matching a constant. We don't get it from a derive
// because the derived `PartialEq` would do a field projection, which is banned
// by <https://github.com/rust-lang/compiler-team/issues/807>.
#[unstable(
feature = "nonzero_internals",
reason = "implementation detail which may disappear or be replaced at any time",
issue = "none"
)]
impl StructuralPartialEq for $NonZeroInner {}
)+
}

$(
Expand All @@ -88,7 +59,7 @@ macro_rules! impl_zeroable_primitive {
issue = "none"
)]
unsafe impl ZeroablePrimitive for $primitive {
type NonZeroInner = private::$NonZeroInner;
type NonZeroInner = super::niche_types::$NonZeroInner;
}
)+
};
Expand Down
Loading
Loading