diff --git a/src/ion/data_structures.rs b/src/ion/data_structures.rs index 33b7a7bd..e6605206 100644 --- a/src/ion/data_structures.rs +++ b/src/ion/data_structures.rs @@ -25,6 +25,7 @@ use alloc::string::String; use alloc::vec::Vec; use core::cmp::Ordering; use core::fmt::Debug; +use core::ops::Range; use hashbrown::{HashMap, HashSet}; use smallvec::{smallvec, SmallVec}; @@ -97,7 +98,6 @@ impl core::cmp::Ord for CodeRange { define_index!(LiveBundleIndex, LiveBundles, LiveBundle); define_index!(LiveRangeIndex, LiveRanges, LiveRange); define_index!(SpillSetIndex, SpillSets, SpillSet); -define_index!(UseIndex); define_index!(VRegIndex, VRegs, VRegData); define_index!(PRegIndex); define_index!(SpillSlotIndex); @@ -112,7 +112,6 @@ pub struct LiveRangeListEntry { } pub type LiveRangeList = SmallVec<[LiveRangeListEntry; 4]>; -pub type UseList = SmallVec<[Use; 4]>; #[derive(Clone, Debug)] pub struct LiveRange { @@ -122,7 +121,7 @@ pub struct LiveRange { pub bundle: LiveBundleIndex, pub uses_spill_weight_and_flags: u32, - pub uses: UseList, + pub use_range: Range, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -172,6 +171,10 @@ impl LiveRange { self.uses_spill_weight_and_flags = (self.uses_spill_weight_and_flags & 0xe000_0000) | weight_bits; } + #[inline(always)] + pub fn uses(&self) -> Range { + self.use_range.start as usize..self.use_range.end as usize + } } #[derive(Clone, Copy, Debug)] @@ -402,8 +405,7 @@ impl LiveRanges { vreg: VRegIndex::invalid(), bundle: LiveBundleIndex::invalid(), uses_spill_weight_and_flags: 0, - - uses: smallvec![], + use_range: 0..0, }) } } @@ -455,6 +457,7 @@ pub struct Env<'a, F: Function> { pub blockparam_ins: Vec, pub ranges: LiveRanges, + pub uses: Vec, pub bundles: LiveBundles, pub spillsets: SpillSets, pub vregs: VRegs, diff --git a/src/ion/dump.rs b/src/ion/dump.rs index abcaaf32..d7555539 100644 --- a/src/ion/dump.rs +++ b/src/ion/dump.rs @@ -48,7 +48,7 @@ impl<'a, F: Function> Env<'a, F> { r.bundle, r.uses_spill_weight(), ); - for u in &r.uses { + for u in &self.uses[r.uses()] { trace!(" * use at {:?} (slot {}): {:?}", u.pos, u.slot, u.operand); } } diff --git a/src/ion/liveranges.rs b/src/ion/liveranges.rs index 09644a24..d6d85bd1 100644 --- a/src/ion/liveranges.rs +++ b/src/ion/liveranges.rs @@ -239,7 +239,9 @@ impl<'a, F: Function> Env<'a, F> { // because those will be computed during the multi-fixed-reg // fixup pass later (after all uses are inserted). - self.ranges[into].uses.push(u); + // We also defer calculating the use range in the uses vector until + // that vector is sorted by vreg and position. + self.uses.push(u); // Update stats. let range_weight = self.ranges[into].uses_spill_weight() + weight; @@ -770,15 +772,9 @@ impl<'a, F: Function> Env<'a, F> { } } - for range in &mut self.ranges { - range.uses.reverse(); - debug_assert!(range.uses.windows(2).all(|win| win[0].pos <= win[1].pos)); - } - // Insert safepoint virtual stack uses, if needed. for &vreg in self.func.reftype_vregs() { let vreg = VRegIndex::new(vreg.vreg()); - let mut inserted = false; let mut safepoint_idx = 0; for range_idx in 0..self.vregs[vreg].ranges.len() { let LiveRangeListEntry { range, index } = self.vregs[vreg].ranges[range_idx]; @@ -808,12 +804,6 @@ impl<'a, F: Function> Env<'a, F> { self.insert_use_into_liverange(index, Use::new(operand, pos, SLOT_NONE)); safepoint_idx += 1; - - inserted = true; - } - - if inserted { - self.ranges[index].uses.sort_unstable_by_key(|u| u.pos); } if safepoint_idx >= self.safepoints.len() { @@ -822,6 +812,27 @@ impl<'a, F: Function> Env<'a, F> { } } + // Sort uses by VReg and by position (and by slot to keep the + // ordering stable). Then assign ranges of uses to each live range. + self.uses + .sort_unstable_by_key(|u| (u.operand.vreg(), u.pos, u.slot)); + for range in &mut self.ranges { + let start = self.uses.partition_point(|u| { + VRegIndex::new(u.operand.vreg().vreg()) + .cmp(&range.vreg) + .then(u.pos.cmp(&range.range.from)) + .is_lt() + }); + let end = self.uses.partition_point(|u| { + VRegIndex::new(u.operand.vreg().vreg()) + .cmp(&range.vreg) + .then(u.pos.cmp(&range.range.to)) + .is_lt() + }); + debug_assert!(start <= end); + range.use_range = (start as u32)..(end as u32); + } + self.blockparam_ins.sort_unstable_by_key(|x| x.key()); self.blockparam_outs.sort_unstable_by_key(|x| x.key()); @@ -849,7 +860,8 @@ impl<'a, F: Function> Env<'a, F> { trace!("multi-fixed-reg cleanup: vreg {:?} range {:?}", vreg, range,); // Find groups of uses that occur in at the same program point. - for uses in self.ranges[range].uses.linear_group_by_key_mut(|u| u.pos) { + for uses in self.uses[self.ranges[range].uses()].linear_group_by_key_mut(|u| u.pos) + { if uses.len() < 2 { continue; } diff --git a/src/ion/merge.rs b/src/ion/merge.rs index 09bd2906..69f74123 100644 --- a/src/ion/merge.rs +++ b/src/ion/merge.rs @@ -265,7 +265,7 @@ impl<'a, F: Function> Env<'a, F> { let mut fixed_def = false; let mut stack = false; for entry in &self.bundles[bundle].ranges { - for u in &self.ranges[entry.index].uses { + for u in &self.uses[self.ranges[entry.index].uses()] { if let OperandConstraint::FixedReg(_) = u.operand.constraint() { fixed = true; if u.operand.kind() == OperandKind::Def { diff --git a/src/ion/mod.rs b/src/ion/mod.rs index 4d32eb8c..1226d43f 100644 --- a/src/ion/mod.rs +++ b/src/ion/mod.rs @@ -59,6 +59,7 @@ impl<'a, F: Function> Env<'a, F> { blockparam_ins: vec![], bundles: LiveBundles::with_capacity(n), ranges: LiveRanges::with_capacity(4 * n), + uses: Vec::with_capacity(8 * n), spillsets: SpillSets::with_capacity(n), vregs: VRegs::with_capacity(n), pregs: vec![], diff --git a/src/ion/moves.rs b/src/ion/moves.rs index a828ec04..5a9d1bc4 100644 --- a/src/ion/moves.rs +++ b/src/ion/moves.rs @@ -556,8 +556,8 @@ impl<'a, F: Function> Env<'a, F> { } // Scan over def/uses and apply allocations. - for use_idx in 0..self.ranges[entry.index].uses.len() { - let usedata = self.ranges[entry.index].uses[use_idx]; + for use_idx in self.ranges[entry.index].uses() { + let usedata = self.uses[use_idx]; trace!("applying to use: {:?}", usedata); debug_assert!(range.contains_point(usedata.pos)); let inst = usedata.pos.inst(); diff --git a/src/ion/process.rs b/src/ion/process.rs index 371ab84b..84f98d63 100644 --- a/src/ion/process.rs +++ b/src/ion/process.rs @@ -15,7 +15,7 @@ use super::{ spill_weight_from_constraint, Env, LiveBundleIndex, LiveBundleVec, LiveRangeFlag, LiveRangeIndex, LiveRangeKey, LiveRangeList, LiveRangeListEntry, PRegIndex, RegTraversalIter, - Requirement, SpillWeight, UseList, VRegIndex, + Requirement, SpillWeight, VRegIndex, }; use crate::{ ion::data_structures::{ @@ -277,7 +277,7 @@ impl<'a, F: Function> Env<'a, F> { minimal = true; fixed = true; } else { - for u in &first_range_data.uses { + for u in &self.uses[first_range_data.uses()] { trace!(" -> use: {:?}", u); if let OperandConstraint::FixedReg(_) = u.operand.constraint() { trace!(" -> fixed operand at {:?}: {:?}", u.pos, u.operand); @@ -353,13 +353,14 @@ impl<'a, F: Function> Env<'a, F> { pub fn recompute_range_properties(&mut self, range: LiveRangeIndex) { let rangedata = &mut self.ranges[range]; + let uses = &self.uses[rangedata.uses()]; let mut w = SpillWeight::zero(); - for u in &rangedata.uses { + for u in uses { w = w + SpillWeight::from_bits(u.weight); trace!("range{}: use {:?}", range.index(), u); } rangedata.set_uses_spill_weight(w); - if rangedata.uses.len() > 0 && rangedata.uses[0].operand.kind() == OperandKind::Def { + if uses.len() > 0 && uses[0].operand.kind() == OperandKind::Def { // Note that we *set* the flag here, but we never *clear* // it: it may be set by a progmove as well (which does not // create an explicit use or def), and we want to preserve @@ -438,7 +439,7 @@ impl<'a, F: Function> Env<'a, F> { // Find any uses; if none, just chop off one instruction. let mut first_use = None; 'outer: for entry in &self.bundles[bundle].ranges { - for u in &self.ranges[entry.index].uses { + for u in &self.uses[self.ranges[entry.index].uses()] { first_use = Some(u.pos); break 'outer; } @@ -500,7 +501,7 @@ impl<'a, F: Function> Env<'a, F> { // When the bundle contains a fixed constraint, we advance the split point to right // before the first instruction with a fixed use present. if self.bundles[bundle].cached_fixed() { - for u in &self.ranges[entry.index].uses { + for u in &self.uses[self.ranges[entry.index].uses()] { if u.pos < split_at { continue; } @@ -558,20 +559,15 @@ impl<'a, F: Function> Env<'a, F> { }); self.ranges[new_lr].vreg = self.ranges[orig_lr].vreg; trace!(" -> splitting LR {:?} into {:?}", orig_lr, new_lr); - let first_use = self.ranges[orig_lr] - .uses + let first_use = match self.uses[self.ranges[orig_lr].uses()] .iter() .position(|u| u.pos >= split_at) - .unwrap_or(self.ranges[orig_lr].uses.len()); - let rest_uses: UseList = self.ranges[orig_lr] - .uses - .iter() - .cloned() - .skip(first_use) - .collect(); - self.ranges[new_lr].uses = rest_uses; - self.ranges[orig_lr].uses.truncate(first_use); - self.ranges[orig_lr].uses.shrink_to_fit(); + { + Some(pos) => self.ranges[orig_lr].use_range.start + pos as u32, + None => self.ranges[orig_lr].use_range.end, + }; + self.ranges[new_lr].use_range = first_use..self.ranges[orig_lr].use_range.end; + self.ranges[orig_lr].use_range.end = first_use; self.recompute_range_properties(orig_lr); self.recompute_range_properties(new_lr); new_lr_list[0].index = new_lr; @@ -613,7 +609,9 @@ impl<'a, F: Function> Env<'a, F> { while let Some(entry) = self.bundles[bundle].ranges.last().cloned() { let end = entry.range.to; let vreg = self.ranges[entry.index].vreg; - let last_use = self.ranges[entry.index].uses.last().map(|u| u.pos); + let last_use = self.uses[self.ranges[entry.index].uses()] + .last() + .map(|u| u.pos); if last_use.is_none() { let spill = self .get_or_create_spill_bundle(bundle, /* create_if_absent = */ true) @@ -673,7 +671,9 @@ impl<'a, F: Function> Env<'a, F> { } let start = entry.range.from; let vreg = self.ranges[entry.index].vreg; - let first_use = self.ranges[entry.index].uses.first().map(|u| u.pos); + let first_use = self.uses[self.ranges[entry.index].uses()] + .first() + .map(|u| u.pos); if first_use.is_none() { let spill = self .get_or_create_spill_bundle(new_bundle, /* create_if_absent = */ true) @@ -800,8 +800,6 @@ impl<'a, F: Function> Env<'a, F> { let mut last_inst: Option = None; let mut last_vreg: Option = None; - let mut spill_uses = UseList::new(); - for entry in core::mem::take(&mut self.bundles[bundle].ranges) { let lr_from = entry.range.from; let lr_to = entry.range.to; @@ -815,7 +813,8 @@ impl<'a, F: Function> Env<'a, F> { let mut spill_starts_def = false; let mut last_live_pos = entry.range.from; - for u in core::mem::take(&mut self.ranges[entry.index].uses) { + for use_idx in self.ranges[entry.index].uses() { + let u = &self.uses[use_idx]; trace!(" -> use {:?} (last_live_pos {:?})", u, last_live_pos); let is_def = u.operand.kind() == OperandKind::Def; @@ -825,16 +824,16 @@ impl<'a, F: Function> Env<'a, F> { // any-constrained uses will be easy to satisfy. Solving those constraints earlier // could create unnecessary conflicts with existing bundles that need to fit in a // register, more strict requirements, so we delay them eagerly. - if u.operand.constraint() == OperandConstraint::Any { - trace!(" -> migrating this any-constrained use to the spill range"); - spill_uses.push(u); + // if u.operand.constraint() == OperandConstraint::Any { + // trace!(" -> migrating this any-constrained use to the spill range"); + // spill_uses.push(u); - // Remember if we're moving the def of this vreg into the spill range, so that - // we can set the appropriate flags on it later. - spill_starts_def = spill_starts_def || is_def; + // // Remember if we're moving the def of this vreg into the spill range, so that + // // we can set the appropriate flags on it later. + // spill_starts_def = spill_starts_def || is_def; - continue; - } + // continue; + // } // If this is a def of the vreg the entry cares about, make sure that the spill // range starts right before the next instruction so that the value is available. @@ -846,7 +845,8 @@ impl<'a, F: Function> Env<'a, F> { // If we just created a LR for this inst at the last // pos, add this use to the same LR. if Some(u.pos.inst()) == last_inst && Some(vreg) == last_vreg { - self.ranges[last_lr.unwrap()].uses.push(u); + debug_assert_eq!(self.ranges[last_lr.unwrap()].use_range.end, use_idx as u32); + self.ranges[last_lr.unwrap()].use_range.end = use_idx as u32 + 1; trace!(" -> appended to last LR {:?}", last_lr.unwrap()); continue; } @@ -864,7 +864,7 @@ impl<'a, F: Function> Env<'a, F> { let cr = CodeRange { from: u.pos, to }; let lr = self.ranges.add(cr); new_lrs.push((vreg, lr)); - self.ranges[lr].uses.push(u); + self.ranges[lr].use_range = use_idx as u32..use_idx as u32 + 1; self.ranges[lr].vreg = vreg; trace!( @@ -898,7 +898,7 @@ impl<'a, F: Function> Env<'a, F> { let cr = CodeRange { from: pos, to }; let lr = self.ranges.add(cr); new_lrs.push((vreg, lr)); - self.ranges[lr].uses.push(u); + self.ranges[lr].use_range = use_idx as u32..use_idx as u32 + 1; self.ranges[lr].vreg = vreg; // Create a new bundle that contains only this LR. @@ -938,7 +938,6 @@ impl<'a, F: Function> Env<'a, F> { let spill_lr = self.ranges.add(spill_range); self.ranges[spill_lr].vreg = vreg; self.ranges[spill_lr].bundle = spill; - self.ranges[spill_lr].uses.extend(spill_uses.drain(..)); new_lrs.push((vreg, spill_lr)); if spill_starts_def { @@ -956,8 +955,6 @@ impl<'a, F: Function> Env<'a, F> { spill_lr, spill ); - } else { - assert!(spill_uses.is_empty()); } } diff --git a/src/ion/requirement.rs b/src/ion/requirement.rs index fc049e20..9c018c3d 100644 --- a/src/ion/requirement.rs +++ b/src/ion/requirement.rs @@ -131,7 +131,7 @@ impl<'a, F: Function> Env<'a, F> { let ranges = &self.bundles[bundle].ranges; for entry in ranges { trace!(" -> LR {:?}: {:?}", entry.index, entry.range); - for u in &self.ranges[entry.index].uses { + for u in &self.uses[self.ranges[entry.index].uses()] { trace!(" -> use {:?}", u); let r = self.requirement_from_operand(u.operand); req = req.merge(r).map_err(|_| {