Skip to content

Inspect enum discriminant *after* calling its destructor #24765

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Apr 27, 2015
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/librustc_trans/trans/_match.rs
Original file line number Diff line number Diff line change
@@ -916,8 +916,8 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,

let datum = Datum::new(llval, binding_info.ty, Lvalue);
if let Some(cs) = cs {
bcx.fcx.schedule_drop_and_zero_mem(cs, llval, binding_info.ty);
bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch);
bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty);
}

debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval));
3 changes: 2 additions & 1 deletion src/librustc_trans/trans/adt.rs
Original file line number Diff line number Diff line change
@@ -769,6 +769,7 @@ pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
(_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None)))
}
Univariant(..) => {
// N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
(_match::Single, None)
}
}
@@ -1060,7 +1061,7 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr<'tcx
));
bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false);
datum::Datum::new(ptr, ptr_ty, datum::Rvalue::new(datum::ByRef))
datum::Datum::new(ptr, ptr_ty, datum::Lvalue)
.store_to(variant_cx, scratch.val)
});
let expr_datum = scratch.to_expr_datum();
7 changes: 5 additions & 2 deletions src/librustc_trans/trans/base.rs
Original file line number Diff line number Diff line change
@@ -471,8 +471,11 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,

match adt::trans_switch(cx, &*repr, av) {
(_match::Single, None) => {
cx = iter_variant(cx, &*repr, av, &*(*variants)[0],
substs, &mut f);
if n_variants != 0 {
assert!(n_variants == 1);
cx = iter_variant(cx, &*repr, av, &*(*variants)[0],
substs, &mut f);
}
}
(_match::Switch, Some(lldiscrim_a)) => {
cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
84 changes: 67 additions & 17 deletions src/librustc_trans/trans/cleanup.rs
Original file line number Diff line number Diff line change
@@ -393,19 +393,22 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
zero: false
fill_on_drop: false,
skip_dtor: false,
};

debug!("schedule_drop_mem({:?}, val={}, ty={})",
debug!("schedule_drop_mem({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);

self.schedule_clean(cleanup_scope, drop as CleanupObj);
}

/// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_and_zero_mem(&self,
/// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
@@ -416,14 +419,48 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
zero: true
fill_on_drop: true,
skip_dtor: false,
};

debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={}, fill_on_drop={}, skip_dtor={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);

self.schedule_clean(cleanup_scope, drop as CleanupObj);
}

/// Issue #23611: Schedules a (deep) drop of the contents of
/// `val`, which is a pointer to an instance of struct/enum type
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
fn schedule_drop_enum_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.type_needs_drop(ty) { return; }

let drop = box DropValue {
is_immediate: false,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: true,
};

debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
debug!("schedule_drop_enum_contents({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
true);
drop.fill_on_drop,
drop.skip_dtor);

self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
@@ -440,13 +477,16 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
zero: false
fill_on_drop: false,
skip_dtor: false,
};

debug!("schedule_drop_immediate({:?}, val={}, ty={:?})",
debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);

self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
@@ -987,7 +1027,8 @@ pub struct DropValue<'tcx> {
must_unwind: bool,
val: ValueRef,
ty: Ty<'tcx>,
zero: bool
fill_on_drop: bool,
skip_dtor: bool,
}

impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
@@ -1007,13 +1048,18 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = base::push_ctxt("<DropValue as Cleanup>::trans");
let skip_dtor = self.skip_dtor;
let _icx = if skip_dtor {
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
} else {
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
};
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
glue::drop_ty(bcx, self.val, self.ty, debug_loc)
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
};
if self.zero {
if self.fill_on_drop {
base::drop_done_fill_mem(bcx, self.val, self.ty);
}
bcx
@@ -1190,10 +1236,14 @@ pub trait CleanupMethods<'blk, 'tcx> {
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_and_zero_mem(&self,
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_enum_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_immediate(&self,
cleanup_scope: ScopeId,
val: ValueRef,
9 changes: 5 additions & 4 deletions src/librustc_trans/trans/context.rs
Original file line number Diff line number Diff line change
@@ -21,6 +21,7 @@ use trans::builder::Builder;
use trans::common::{ExternMap,BuilderRef_res};
use trans::debuginfo;
use trans::declare;
use trans::glue::DropGlueKind;
use trans::monomorphize::MonoId;
use trans::type_::{Type, TypeNames};
use middle::subst::Substs;
@@ -73,7 +74,7 @@ pub struct SharedCrateContext<'tcx> {
check_drop_flag_for_sanity: bool,

available_monomorphizations: RefCell<FnvHashSet<String>>,
available_drop_glues: RefCell<FnvHashMap<Ty<'tcx>, String>>,
available_drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, String>>,
}

/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
@@ -89,7 +90,7 @@ pub struct LocalCrateContext<'tcx> {
item_vals: RefCell<NodeMap<ValueRef>>,
needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>,
fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>>,
/// Track mapping of external ids to local items imported for inlining
external: RefCell<DefIdMap<Option<ast::NodeId>>>,
/// Backwards version of the `external` map (inlined items to where they
@@ -574,7 +575,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local.fn_pointer_shims
}

pub fn drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, ValueRef>> {
pub fn drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>> {
&self.local.drop_glues
}

@@ -660,7 +661,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.shared.available_monomorphizations
}

pub fn available_drop_glues<'a>(&'a self) -> &'a RefCell<FnvHashMap<Ty<'tcx>, String>> {
pub fn available_drop_glues(&self) -> &RefCell<FnvHashMap<DropGlueKind<'tcx>, String>> {
&self.shared.available_drop_glues
}

193 changes: 102 additions & 91 deletions src/librustc_trans/trans/glue.rs
Original file line number Diff line number Diff line change
@@ -30,7 +30,6 @@ use trans::callee;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::datum;
use trans::debuginfo::DebugLoc;
use trans::declare;
use trans::expr;
@@ -132,14 +131,26 @@ pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false)
}

pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool) -> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty(t={})", t.repr(bcx.tcx()));
debug!("drop_ty_core(t={}, skip_dtor={})", t.repr(bcx.tcx()), skip_dtor);
let _icx = push_ctxt("drop_ty");
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let glue = get_drop_glue(ccx, t);
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx, t);
let ptr = if glue_type != t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
@@ -155,22 +166,64 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc)
debug_loc: DebugLoc,
skip_dtor: bool)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
drop_ty(bcx, vp, t, debug_loc)
drop_ty_core(bcx, vp, t, debug_loc, skip_dtor)
}

pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
let t = get_drop_glue_type(ccx, t);
debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
match ccx.drop_glues().borrow().get(&t) {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}

#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for for Newtype
/// itself will be skipped, while the Drop impl for S, if any,
/// will be invoked.
TyContents(Ty<'tcx>),
}

impl<'tcx> DropGlueKind<'tcx> {
fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}

fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}

fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
let t_str = ppaux::ty_to_string(ccx.tcx(), self.ty());
match *self {
DropGlueKind::Ty(_) => format!("DropGlueKind::Ty({})", t_str),
DropGlueKind::TyContents(_) => format!("DropGlueKind::TyContents({})", t_str),
}
}
}

fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {}", g.to_string(ccx));
let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
debug!("drop glue type {}", g.to_string(ccx));
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
_ => { }
}
let t = g.ty();

let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
@@ -182,17 +235,17 @@ pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Val

// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&t) {
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ty::mk_nil(ccx.tcx()));
ccx.drop_glues().borrow_mut().insert(t, llfn);
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};

let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ty::mk_nil(ccx.tcx())).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(t, fn_nm);
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);

let _s = StatRecorder::new(ccx, format!("drop {}", ty_to_short_str(ccx.tcx(), t)));

@@ -217,7 +270,7 @@ pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Val
// type, so we don't need to explicitly cast the function parameter.

let llrawptr0 = get_param(llfn, fcx.arg_pos(0) as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, t);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())), DebugLoc::None);

llfn
@@ -307,88 +360,36 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx>
{
let repr = adt::represent_type(bcx.ccx(), t);
debug!("trans_struct_drop t: {}", bcx.ty_to_string(t));

// Find and call the actual destructor
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, t,
class_did, substs);
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, t, class_did, substs);

// The first argument is the "self" argument for drop
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
assert_eq!(params.len(), 1);

let fty = ty::lookup_item_type(bcx.tcx(), dtor_did).ty.subst(bcx.tcx(), substs);
let self_ty = match fty.sty {
ty::ty_bare_fn(_, ref f) => {
let sig = ty::erase_late_bound_regions(bcx.tcx(), &f.sig);
assert!(sig.inputs.len() == 1);
sig.inputs[0]
}
_ => bcx.sess().bug(&format!("Expected function type, found {}",
bcx.ty_to_string(fty)))
};

let (struct_data, info) = if type_is_sized(bcx.tcx(), t) {
(v0, None)
} else {
let data = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
(Load(bcx, data), Some(Load(bcx, info)))
};

adt::fold_variants(bcx, &*repr, struct_data, |variant_cx, st, value| {
// Be sure to put all of the fields into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
let field_scope = variant_cx.fcx.push_custom_cleanup_scope();

// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
assert_eq!(params.len(), 1);
let self_arg = if type_is_fat_ptr(bcx.tcx(), self_ty) {
// The dtor expects a fat pointer, so make one, even if we have to fake it.
let scratch = datum::rvalue_scratch_datum(bcx, t, "__fat_ptr_drop_self");
Store(bcx, value, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
Store(bcx,
// If we just had a thin pointer, make a fat pointer by sticking
// null where we put the unsizing info. This works because t
// is a sized type, so we will only unpack the fat pointer, never
// use the fake info.
info.unwrap_or(C_null(Type::i8p(bcx.ccx()))),
GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
PointerCast(variant_cx, scratch.val, params[0])
} else {
PointerCast(variant_cx, value, params[0])
};
let args = vec!(self_arg);
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope();

// Add all the fields as a value which needs to be cleaned at the end of
// this scope. Iterate in reverse order so a Drop impl doesn't reverse
// the order in which fields get dropped.
for (i, &ty) in st.fields.iter().enumerate().rev() {
let llfld_a = adt::struct_field_ptr(variant_cx, &*st, value, i, false);
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_enum_contents(cleanup::CustomScope(contents_scope), v0, t);

let val = if type_is_sized(bcx.tcx(), ty) {
llfld_a
} else {
let scratch = datum::rvalue_scratch_datum(bcx, ty, "__fat_ptr_drop_field");
Store(bcx, llfld_a, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
Store(bcx, info.unwrap(), GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
scratch.val
};
variant_cx.fcx.schedule_drop_mem(cleanup::CustomScope(field_scope), val, ty);
}

let dtor_ty = ty::mk_ctor_fn(bcx.tcx(),
class_did,
&[get_drop_glue_type(bcx.ccx(), t)],
ty::mk_nil(bcx.tcx()));
let (_, variant_cx) = invoke(variant_cx, dtor_addr, &args[..], dtor_ty, DebugLoc::None);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
let dtor_ty = ty::mk_ctor_fn(bcx.tcx(), class_did, &[glue_type], ty::mk_nil(bcx.tcx()));
let (_, bcx) = invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None);

variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope)
})
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}

fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
@@ -454,8 +455,10 @@ fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info:
}
}

fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
-> Block<'blk, 'tcx> {
let t = g.ty();
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");

@@ -469,6 +472,10 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)

match t.sty {
ty::ty_uniq(content_ty) => {
// Support for ty_uniq is built-in and its drop glue is
// special. It may move to library and have Drop impl. As
// a safe-guard, assert ty_uniq not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let llbox = Load(bcx, llval);
@@ -505,8 +512,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)
}
ty::ty_struct(did, substs) | ty::ty_enum(did, substs) => {
let tcx = bcx.tcx();
match ty::ty_dtor(tcx, did) {
ty::TraitDtor(dtor, true) => {
match (ty::ty_dtor(tcx, did), skip_dtor) {
(ty::TraitDtor(dtor, true), false) => {
// FIXME(16758) Since the struct is unsized, it is hard to
// find the drop flag (which is at the end of the struct).
// Lets just ignore the flag and pretend everything will be
@@ -523,16 +530,20 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)
trans_struct_drop(bcx, t, v0, dtor, did, substs)
}
}
ty::TraitDtor(dtor, false) => {
(ty::TraitDtor(dtor, false), false) => {
trans_struct_drop(bcx, t, v0, dtor, did, substs)
}
ty::NoDtor => {
(ty::NoDtor, _) | (_, true) => {
// No dtor? Just the default case
iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
}
}
}
ty::ty_trait(..) => {
// No support in vtable for distinguishing destroying with
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let data_ptr = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
let vtable_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]));
let dtor = Load(bcx, vtable_ptr);
265 changes: 265 additions & 0 deletions src/test/run-pass/issue-23611-enum-swap-in-drop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,265 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

// Issue 23611: this test is ensuring that, for an instance `X` of the
// enum `E`, if you swap in a different variant during the execution
// of the `<E as Drop>::drop`, then the appropriate substructure will
// be torn down after the `<E as Drop>::drop` method returns.

use std::cell::RefCell;
use std::mem;

use self::d::D;

pub fn main() {
let log = RefCell::new(vec![]);
d::println(&format!("created empty log"));
test(&log);

// println!("log: {:?}", &log.borrow()[..]);
assert_eq!(
&log.borrow()[..],
[
// created empty log
// +-- Make D(test_1, 10000000)
// | +-- Make D(g_b_5, 50000001)
// | | in g_B(b2b0) from E::drop, b=b4b0
// | +-- Drop D(g_b_5, 50000001)
50000001,
// |
// | +-- Make D(drop_6, 60000002)
// | | +-- Make D(g_b_5, 50000003)
// | | | in g_B(b2b0) from E::drop, b=b4b1
// | | +-- Drop D(g_b_5, 50000003)
50000003,
// | |
// | | +-- Make D(GaspB::drop_3, 30000004)
// | | | +-- Make D(g_b_5, 50000005)
// | | | | in g_B(b4b2) from GaspB::drop
// | | | +-- Drop D(g_b_5, 50000005)
50000005,
// | | |
// | | +-- Drop D(GaspB::drop_3, 30000004)
30000004,
// | |
// +-- Drop D(test_1, 10000000)
10000000,
// |
// +-- Make D(GaspA::drop_2, 20000006)
// | | +-- Make D(f_a_4, 40000007)
// | | | in f_A(a3a0) from GaspA::drop
// | | +-- Drop D(f_a_4, 40000007)
40000007,
// | |
// +-- Drop D(GaspA::drop_2, 20000006)
20000006,
// |
// +-- Drop D(drop_6, 60000002)
60000002
//
]);

// For reference purposes, the old (incorrect) behavior would produce the following
// output, which you can compare to the above:
//
// created empty log
// +-- Make D(test_1, 10000000)
// | +-- Make D(g_b_5, 50000001)
// | | in g_B(b2b0) from E::drop, b=b4b0
// | +-- Drop D(g_b_5, 50000001)
// |
// | +-- Make D(drop_6, 60000002)
// | | +-- Make D(g_b_5, 50000003)
// | | | in g_B(b2b0) from E::drop, b=b4b1
// | | +-- Drop D(g_b_5, 50000003)
// | |
// | | +-- Make D(GaspB::drop_3, 30000004)
// | | | +-- Make D(g_b_5, 50000005)
// | | | | in g_B(b4b2) from GaspB::drop
// | | | +-- Drop D(g_b_5, 50000005)
// | | |
// | | +-- Drop D(GaspB::drop_3, 30000004)
// | |
// +-- Drop D(test_1, 10000000)
// |
// +-- Make D(GaspB::drop_3, 30000006)
// | | +-- Make D(f_a_4, 40000007)
// | | | in f_A(a3a0) from GaspB::drop
// | | +-- Drop D(f_a_4, 40000007)
// | |
// +-- Drop D(GaspB::drop_3, 30000006)
// |
// +-- Drop D(drop_6, 60000002)

// Note that this calls f_A from GaspB::drop (and thus creates a D
// with a uid incorporating the origin of GaspB's drop that
// surrounds the f_A invocation), but the code as written only
// ever hands f_A off to instances of GaspA, and thus one should
// be able to prove the invariant that f_A is *only* invoked from
// from an instance of GaspA (either via the GaspA drop
// implementation or the E drop implementaton). Yet the old (bad)
// behavior allowed a call to f_A to leak in while we are tearing
// down a value of type GaspB.
}

fn test<'a>(log: d::Log<'a>) {
let _e = E::B(GaspB(g_b, 0xB4B0, log, D::new("test", 1, log)), true);
}

struct GaspA<'a>(for <'b> fn(u32, &'b str, d::Log<'a>), u32, d::Log<'a>, d::D<'a>);
struct GaspB<'a>(for <'b> fn(u32, &'b str, d::Log<'a>), u32, d::Log<'a>, d::D<'a>);

impl<'a> Drop for GaspA<'a> {
fn drop(&mut self) {
let _d = d::D::new("GaspA::drop", 2, self.2);
(self.0)(self.1, "GaspA::drop", self.2);
}
}

impl<'a> Drop for GaspB<'a> {
fn drop(&mut self) {
let _d = d::D::new("GaspB::drop", 3, self.2);
(self.0)(self.1, "GaspB::drop", self.2);
}
}

enum E<'a> {
A(GaspA<'a>, bool), B(GaspB<'a>, bool),
}

fn f_a(x: u32, ctxt: &str, log: d::Log) {
let _d = d::D::new("f_a", 4, log);
d::println(&format!("in f_A({:x}) from {}", x, ctxt));
}
fn g_b(y: u32, ctxt: &str, log: d::Log) {
let _d = d::D::new("g_b", 5, log);
d::println(&format!("in g_B({:x}) from {}", y, ctxt));
}

impl<'a> Drop for E<'a> {
fn drop(&mut self) {
let (do_drop, log) = match *self {
E::A(GaspA(ref f, ref mut val_a, log, ref _d_a), ref mut do_drop) => {
f(0xA1A0, &format!("E::drop, a={:x}", val_a), log);
*val_a += 1;
// swap in do_drop := false to avoid infinite dtor regress
(mem::replace(do_drop, false), log)
}
E::B(GaspB(ref g, ref mut val_b, log, ref _d_b), ref mut do_drop) => {
g(0xB2B0, &format!("E::drop, b={:x}", val_b), log);
*val_b += 1;
// swap in do_drop := false to avoid infinite dtor regress
(mem::replace(do_drop, false), log)
}
};

if do_drop {
mem::replace(self, E::A(GaspA(f_a, 0xA3A0, log, D::new("drop", 6, log)), true));
}
}
}

// This module provides simultaneous printouts of the dynamic extents
// of all of the D values, in addition to logging the order that each
// is dropped.
//
// This code is similar to a support code module embedded within
// test/run-pass/issue-123338-ensure-param-drop-order.rs; however,
// that (slightly simpler) code only identifies objects in the log via
// (creation) time-stamps; this incorporates both timestamping and the
// point of origin within the source code into the unique ID (uid).

const PREF_INDENT: u32 = 20;

pub mod d {
#![allow(unused_parens)]
use std::fmt;
use std::mem;
use std::cell::RefCell;

static mut counter: u16 = 0;
static mut trails: u64 = 0;

pub type Log<'a> = &'a RefCell<Vec<u32>>;

pub fn current_width() -> u32 {
unsafe { max_width() - trails.leading_zeros() }
}

pub fn max_width() -> u32 {
unsafe {
(mem::size_of_val(&trails)*8) as u32
}
}

pub fn indent_println(my_trails: u32, s: &str) {
let mut indent: String = String::new();
for i in 0..my_trails {
unsafe {
if trails & (1 << i) != 0 {
indent = indent + "| ";
} else {
indent = indent + " ";
}
}
}
println!("{}{}", indent, s);
}

pub fn println(s: &str) {
indent_println(super::PREF_INDENT, s);
}

fn first_avail() -> u32 {
unsafe {
for i in 0..64 {
if trails & (1 << i) == 0 {
return i;
}
}
}
panic!("exhausted trails");
}

pub struct D<'a> {
name: &'static str, i: u8, uid: u32, trail: u32, log: Log<'a>
}

impl<'a> fmt::Display for D<'a> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "D({}_{}, {})", self.name, self.i, self.uid)
}
}

impl<'a> D<'a> {
pub fn new(name: &'static str, i: u8, log: Log<'a>) -> D<'a> {
unsafe {
let trail = first_avail();
let ctr = ((i as u32) * 10_000_000) + (counter as u32);
counter += 1;
trails |= (1 << trail);
let ret = D {
name: name, i: i, log: log, uid: ctr, trail: trail
};
indent_println(trail, &format!("+-- Make {}", ret));
ret
}
}
}

impl<'a> Drop for D<'a> {
fn drop(&mut self) {
unsafe { trails &= !(1 << self.trail); };
self.log.borrow_mut().push(self.uid);
indent_println(self.trail, &format!("+-- Drop {}", self));
indent_println(::PREF_INDENT, "");
}
}
}