diff --git a/src/incin.rs b/src/incin.rs index dbc0c83..8ac3a05 100755 --- a/src/incin.rs +++ b/src/incin.rs @@ -224,8 +224,8 @@ impl<'incin, T> Drop for Pause<'incin, T> { fn drop(&mut self) { if self.incin.counter.fetch_sub(1, AcqRel) == 1 { // If the previous value was 1, this means now it is 0 and... we can - // delete our local list. - self.incin.tls_list.get().map(GarbageList::clear); + // delete all local lists. + self.incin.tls_list.clear2(GarbageList::clear); } } } diff --git a/src/map/mod.rs b/src/map/mod.rs index 34edbdf..5d51960 100644 --- a/src/map/mod.rs +++ b/src/map/mod.rs @@ -529,6 +529,34 @@ mod test { assert_eq!(*guard.val(), 4); } + #[test] + fn test_drop() { + fn remove(map: Arc>>) { + let map_cloned = map.clone(); + if let Some(item) = map.get(&0) { + std::thread::spawn( + move || { + assert!(map_cloned.remove(&0).is_some()); + } + ).join().ok(); + } + } + + let map = Arc::new(Map::new()); + let item = Arc::new(0); + map.insert(0, item.clone()); + remove(map.clone()); + map.insert(0, item.clone()); + remove(map.clone()); + map.insert(0, item.clone()); + remove(map.clone()); + map.insert(0, item.clone()); + remove(map.clone()); + map.insert(0, item.clone()); + remove(map.clone()); + assert_eq!(Arc::strong_count(&item), 1); + } + #[test] fn create() { let map = Map::new(); diff --git a/src/tls/mod.rs b/src/tls/mod.rs index 945264e..9c80ec5 100644 --- a/src/tls/mod.rs +++ b/src/tls/mod.rs @@ -161,6 +161,54 @@ impl ThreadLocal { } } + pub fn clear2(&self, f: fn(&T) -> ()) { + let table = &*self.top; + fn func(table: &Table, f: fn(&Z) -> ()) { + for a in table.nodes.iter() { + // Load what is in there. + let in_place = a.atomic.load(Acquire); + + // Null means there is nothing. + if in_place.is_null() { + continue; + } + + // Having in_place's lower bit set to 0 means it is a + // pointer to entry. + if in_place as usize & 1 == 0 { + // This is safe since: + // + // 1. We only store nodes with cleared lower bit if it is an + // entry. + // + // 2. We only delete stuff when we are behind mutable + // references. + let entry = unsafe { &*(in_place as *mut Entry) }; + f(&entry.data); + } else { + // The remaining case (non-null with lower bit set to 1) means + // we have a child table. + // Clear the pointer first lower bit so we can dereference it. + let table_ptr = (in_place as usize & !1) as *mut Table; + // Set it as the table to be checked in the next iteration. + // This is safe since: + // + // 1. We only store nodes with marked lower bit if it is an + // table. + // + // 2. W cleared up the bit above so we can get the original + // pointer. + // + // 3. We only delete stuff when we are behind mutable + // references. + func(unsafe { &*table_ptr }, f); + } + } + }; + func(table, f); + } + + /// Accesses the entry for the current thread. If necessary, the `init` /// closure is called to initialize the entry. #[inline]