Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
if let mir::Operand::Constant(const_op) = operand {
let val = self.eval_mir_constant(&const_op);
if val.all_bytes_uninit(self.cx.tcx()) {
return;
}
}
let cg_operand = self.codegen_operand(bx, operand);
// Crucially, we do *not* use `OperandValue::Ref` for types with
// `BackendRepr::Scalar | BackendRepr::ScalarPair`. This ensures we match the MIR
Expand Down
4 changes: 4 additions & 0 deletions compiler/rustc_const_eval/src/interpret/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -522,6 +522,10 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
pub(super) fn op(&self) -> &Operand<Prov> {
&self.op
}

pub fn is_immediate_uninit(&self) -> bool {
matches!(self.op, Operand::Immediate(Immediate::Uninit))
}
}

impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
Expand Down
26 changes: 21 additions & 5 deletions compiler/rustc_mir_transform/src/gvn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -570,9 +570,19 @@ impl<'body, 'a, 'tcx> VnState<'body, 'a, 'tcx> {
_ if ty.is_zst() => ImmTy::uninit(ty).into(),

Opaque(_) => return None,
// Do not bother evaluating repeat expressions. This would uselessly consume memory.
Repeat(..) => return None,

// In general, evaluating repeat expressions just consumes a lot of memory.
// But in the special case that the element is just Immediate::Uninit, we can evaluate
// it without extra memory! If we don't propagate uninit values like this, LLVM can get
// very confused: https://github.com/rust-lang/rust/issues/139355
Repeat(value, _count) => {
let value = self.eval_to_const(value)?;
if value.is_immediate_uninit() {
ImmTy::uninit(ty).into()
} else {
return None;
}
}
Constant { ref value, disambiguator: _ } => {
self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()?
}
Expand Down Expand Up @@ -608,8 +618,12 @@ impl<'body, 'a, 'tcx> VnState<'body, 'a, 'tcx> {
}
Union(active_field, field) => {
let field = self.eval_to_const(field)?;
if matches!(ty.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..))
{
if field.layout.layout.is_zst() {
ImmTy::from_immediate(Immediate::Uninit, ty).into()
} else if matches!(
ty.backend_repr,
BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
) {
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
let field_dest = self.ecx.project_field(&dest, active_field).discard_err()?;
self.ecx.copy_op(field, &field_dest).discard_err()?;
Expand Down Expand Up @@ -1711,7 +1725,9 @@ fn op_to_prop_const<'tcx>(

// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
// avoid.
if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
if !op.is_immediate_uninit()
&& !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..))
{
return None;
}

Expand Down
8 changes: 2 additions & 6 deletions tests/codegen-llvm/uninit-consts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,17 @@ pub struct PartiallyUninit {
y: MaybeUninit<[u8; 10]>,
}

// CHECK: [[FULLY_UNINIT:@.*]] = private unnamed_addr constant [10 x i8] undef

// CHECK: [[PARTIALLY_UNINIT:@.*]] = private unnamed_addr constant <{ [4 x i8], [12 x i8] }> <{ [4 x i8] c"{{\\EF\\BE\\AD\\DE|\\DE\\AD\\BE\\EF}}", [12 x i8] undef }>, align 4

// This shouldn't contain undef, since it contains more chunks
// than the default value of uninit_const_chunk_threshold.
// CHECK: [[UNINIT_PADDING_HUGE:@.*]] = private unnamed_addr constant [32768 x i8] c"{{.+}}", align 4

// CHECK: [[FULLY_UNINIT_HUGE:@.*]] = private unnamed_addr constant [16384 x i8] undef

// CHECK-LABEL: @fully_uninit
#[no_mangle]
pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> {
const M: MaybeUninit<[u8; 10]> = MaybeUninit::uninit();
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align 1 %_0, ptr align 1 {{.*}}[[FULLY_UNINIT]]{{.*}}, i{{(32|64)}} 10, i1 false)
// CHECK: ret void
M
}

Expand All @@ -49,6 +45,6 @@ pub const fn uninit_padding_huge() -> [(u32, u8); 4096] {
#[no_mangle]
pub const fn fully_uninit_huge() -> MaybeUninit<[u32; 4096]> {
const F: MaybeUninit<[u32; 4096]> = MaybeUninit::uninit();
// CHECK: call void @llvm.memcpy.{{.+}}(ptr align 4 %_0, ptr align 4 {{.*}}[[FULLY_UNINIT_HUGE]]{{.*}}, i{{(32|64)}} 16384, i1 false)
// CHECK: ret void
F
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,16 @@
+ _1 = const Union32 {{ value: Indirect { alloc_id: ALLOC0, offset: Size(0 bytes) }: u32, unit: () }};
StorageDead(_2);
- _0 = move _1 as u32 (Transmute);
+ _0 = const Indirect { alloc_id: ALLOC0, offset: Size(0 bytes) }: u32;
+ _0 = const Indirect { alloc_id: ALLOC1, offset: Size(0 bytes) }: u32;
StorageDead(_1);
return;
}
+ }
+
+ ALLOC1 (size: 4, align: 4) {
+ __ __ __ __ │ ░░░░
+ }
+
+ ALLOC0 (size: 4, align: 4) {
+ __ __ __ __ │ ░░░░
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,16 @@
+ _1 = const Union32 {{ value: Indirect { alloc_id: ALLOC0, offset: Size(0 bytes) }: u32, unit: () }};
StorageDead(_2);
- _0 = move _1 as u32 (Transmute);
+ _0 = const Indirect { alloc_id: ALLOC0, offset: Size(0 bytes) }: u32;
+ _0 = const Indirect { alloc_id: ALLOC1, offset: Size(0 bytes) }: u32;
StorageDead(_1);
return;
}
+ }
+
+ ALLOC1 (size: 4, align: 4) {
+ __ __ __ __ │ ░░░░
+ }
+
+ ALLOC0 (size: 4, align: 4) {
+ __ __ __ __ │ ░░░░
}
Expand Down
Loading