diff --git a/bin/otioview.py b/bin/otioview.py index 2f49eb61d..a7203b475 100755 --- a/bin/otioview.py +++ b/bin/otioview.py @@ -28,6 +28,12 @@ def _parsed_args(): return parser.parse_args() +class TimelineWidgetItem(QtGui.QListWidgetItem): + def __init__(self, timeline, *args, **kwargs): + super(TimelineWidgetItem, self).__init__(*args, **kwargs) + self.timeline = timeline + + class Main(QtGui.QMainWindow): def __init__(self, *args, **kwargs): @@ -40,19 +46,24 @@ def __init__(self, *args, **kwargs): self.resize(900, 500) # widgets + self.sequences = QtGui.QListWidget(parent=self) self.timeline = otioViewWidget.timeline.Timeline(parent=self) self.details = otioViewWidget.details.Details(parent=self) # layout + splitter = QtGui.QSplitter(parent=self) + self.setCentralWidget(splitter) + widg = QtGui.QWidget(parent=self) layout = QtGui.QVBoxLayout() - - self.setCentralWidget(widg) widg.setLayout(layout) - layout.addWidget(self.details) layout.addWidget(self.timeline) + splitter.addWidget(self.sequences) + splitter.addWidget(widg) + splitter.setSizes([200, 700]) + # menu menubar = self.menuBar() @@ -63,6 +74,7 @@ def __init__(self, *args, **kwargs): file_menu.addAction(file_load) # signals + self.sequences.itemSelectionChanged.connect(self._change_sequence) self.timeline.selection_changed.connect(self.details.set_item) def _file_load(self): @@ -96,7 +108,22 @@ def load(self, path): self._current_file = path self.setWindowTitle('OTIO viewer - ' + path) self.details.set_item(None) - self.timeline.set_timeline(otio.adapters.read_from_file(path)) + self.sequences.clear() + file_contents = otio.adapters.read_from_file(path) + if isinstance(file_contents, otio.schema.Timeline): + self.timeline.set_timeline(file_contents) + self.sequences.setVisible(False) + elif isinstance(file_contents, + otio.schema.SerializeableCollection): + for s in file_contents: + TimelineWidgetItem(s, s.name, self.sequences) + self.sequences.setVisible(True) + self.timeline.set_timeline(None) + + def _change_sequence(self): + selection = self.sequences.selectedItems() + if selection: + self.timeline.set_timeline(selection[0].timeline) def main(): diff --git a/opentimelineio/adapters/fcp_xml.py b/opentimelineio/adapters/fcp_xml.py index 94b8dab9b..57b99f7f1 100644 --- a/opentimelineio/adapters/fcp_xml.py +++ b/opentimelineio/adapters/fcp_xml.py @@ -25,6 +25,13 @@ def _url_to_path(url): return parsed.path +def _populate_element_map(elem, element_map): + if 'id' in elem.attrib and list(elem): + element_map[elem.tag].setdefault(elem.attrib['id'], elem) + for sub_elem in elem: + _populate_element_map(sub_elem, element_map) + + def _resolved_backreference(elem, tag, element_map): if 'id' in elem.attrib: elem = element_map[tag].setdefault(elem.attrib['id'], elem) @@ -32,6 +39,40 @@ def _resolved_backreference(elem, tag, element_map): return elem +def _populate_backreference_map(item, br_map): + if isinstance(item, otio.media_reference.MediaReference): + tag = 'file' + elif isinstance(item, otio.schema.Sequence): + tag = 'sequence' + else: + tag = None + + if isinstance(item, otio.media_reference.External): + item_hash = hash(str(item.target_url)) + elif isinstance(item, otio.media_reference.MissingReference): + item_hash = 'missing_ref' + else: + item_hash = item.__hash__() + + # skip unspecified tags + if tag is not None: + br_map[tag].setdefault(item_hash, + 1 if not br_map[tag] else + max(br_map[tag].values()) + 1) + + # populate children + if isinstance(item, otio.schema.Timeline): + for sub_item in item.tracks: + _populate_backreference_map(sub_item, br_map) + elif isinstance(item, (otio.schema.Clip, + otio.schema.Gap, + otio.schema.Transition)): + pass + else: + for sub_item in item: + _populate_backreference_map(sub_item, br_map) + + def _backreference_build(tag): # We can also encode these back-references if an item is accessed multiple # times. To do this we store an id attribute on the element. For back- @@ -70,12 +111,13 @@ def _insert_new_sub_element(into_parent, tag, attrib=None, text=''): return elem -def _get_single_sequence(tree): - top_level_sequences = tree.findall('.//project/children/sequence') + \ - tree.findall('./sequence') - if len(top_level_sequences) > 1: - raise NotImplementedError('Multiple sequences are not supported') - return top_level_sequences[0] +def _get_top_level_sequences(elem): + top_level_sequences = elem.findall('./sequence') + for sub_elem in elem: + if sub_elem.tag in ('sequence', 'clip'): + continue + top_level_sequences.extend(_get_top_level_sequences(sub_elem)) + return top_level_sequences def _make_pretty_string(tree_e): @@ -135,7 +177,8 @@ def _parse_media_reference(file_e, element_map): file_rate = _parse_rate(file_e, element_map) timecode_rate = _parse_rate(file_e.find('./timecode'), element_map) timecode_frame = int(file_e.find('./timecode/frame').text) - duration = int(file_e.find('./duration').text) + duration_e = file_e.find('./duration') + duration = int(duration_e.text) if duration_e is not None else 0 available_range = otio.opentime.TimeRange( start_time=otio.opentime.RationalTime(timecode_frame, timecode_rate), @@ -152,8 +195,18 @@ def _parse_clip_item_without_media(clip_item, sequence_rate, transition_offsets, element_map): markers = clip_item.findall('./marker') rate = _parse_rate(clip_item, element_map) - in_frame = int(clip_item.find('./in').text) + transition_offsets[0] - out_frame = int(clip_item.find('./out').text) - transition_offsets[1] + + # transition offsets are provided in timeline rate. If they deviate they + # need to be rescaled to clip item rate + context_transition_offsets = [ + transition_offsets[0].rescaled_to(rate), + transition_offsets[1].rescaled_to(rate) + ] + + in_frame = int(clip_item.find('./in').text) + \ + int(round(context_transition_offsets[0].value)) + out_frame = int(clip_item.find('./out').text) - \ + int(round(context_transition_offsets[1].value)) source_range = otio.opentime.TimeRange( start_time=otio.opentime.RationalTime(in_frame, sequence_rate), @@ -182,8 +235,17 @@ def _parse_clip_item(clip_item, transition_offsets, element_map): ) src_rate = _parse_rate(clip_item.find('./file'), element_map) - in_frame = int(clip_item.find('./in').text) + transition_offsets[0] - out_frame = int(clip_item.find('./out').text) - transition_offsets[1] + # transition offsets are provided in timeline rate. If they deviate they + # need to be rescaled to clip item rate + context_transition_offsets = [ + transition_offsets[0].rescaled_to(src_rate), + transition_offsets[1].rescaled_to(src_rate) + ] + + in_frame = int(clip_item.find('./in').text) + \ + int(round(context_transition_offsets[0].value)) + out_frame = int(clip_item.find('./out').text) - \ + int(round(context_transition_offsets[1].value)) timecode = media_reference.available_range.start_time # source_start in xml is taken relative to the start of the media, whereas @@ -231,8 +293,17 @@ def _parse_sequence_item(sequence_item, transition_offsets, element_map): sequence = _parse_sequence(sequence_item.find('./sequence'), element_map) source_rate = _parse_rate(sequence_item.find('./sequence'), element_map) - in_frame = int(sequence_item.find('./in').text) + transition_offsets[0] - out_frame = int(sequence_item.find('./out').text) - transition_offsets[1] + # transition offsets are provided in timeline rate. If they deviate they + # need to be rescaled to clip item rate + context_transition_offsets = [ + transition_offsets[0].rescaled_to(source_rate), + transition_offsets[1].rescaled_to(source_rate) + ] + + in_frame = int(sequence_item.find('./in').text) + \ + int(round(context_transition_offsets[0].value, 0)) + out_frame = int(sequence_item.find('./out').text) - \ + int(round(context_transition_offsets[1].value)) sequence.source_range = otio.opentime.TimeRange( start_time=otio.opentime.RationalTime(in_frame, source_rate), @@ -283,18 +354,19 @@ def _parse_track(track_e, kind, rate, element_map): # start time and end time on the timeline can be set to -1. This means # that there is a transition at that end of the clip-item. So the time # on the timeline has to be taken from that object. - transition_offsets = [0, 0] + transition_offsets = [otio.opentime.RationalTime(), + otio.opentime.RationalTime()] if track_item.tag == 'clipitem': if start == -1: in_transition = list(track_e)[clip_item_index - 1] start = _get_transition_cut_point(in_transition) - transition_offsets[0] = \ - start - int(in_transition.find('./start').text) + transition_offsets[0] = otio.opentime.RationalTime( + start - int(in_transition.find('./start').text), rate) if end == -1: out_transition = list(track_e)[clip_item_index + 1] end = _get_transition_cut_point(out_transition) - transition_offsets[1] = \ - int(out_transition.find('./end').text) - end + transition_offsets[1] = otio.opentime.RationalTime( + int(out_transition.find('./end').text) - end, rate) # see if we need to add a gap before this clip-item gap_time = start - last_clip_end @@ -348,6 +420,21 @@ def _parse_sequence(sequence, element_map): return stack +def _parse_timeline(sequence, element_map): + sequence = _resolved_backreference(sequence, 'sequence', element_map) + sequence_rate = _parse_rate(sequence, element_map) + timeline = otio.schema.Timeline(name=sequence.find('./name').text) + timeline.global_start_time = otio.opentime.RationalTime(0, sequence_rate) + timeline.tracks = _parse_sequence(sequence, element_map) + return timeline + + +def _parse_collection(sequences, element_map): + collection = otio.schema.SerializeableCollection(name='sequences') + collection.extend([_parse_timeline(s, element_map) for s in sequences]) + return collection + + # ------------------------ # building single sequence # ------------------------ @@ -369,8 +456,8 @@ def _build_item_timings(item_e, item, timeline_range, transition_offsets, # So we subtract the media timecode. source_start = item.source_range.start_time - timecode source_end = item.source_range.end_time_exclusive() - timecode - start = str(int(timeline_range.start_time.value)) - end = str(int(timeline_range.end_time_exclusive().value)) + start = '{:.0f}'.format(timeline_range.start_time.value) + end = '{:.0f}'.format(timeline_range.end_time_exclusive().value) if transition_offsets[0] is not None: start = '-1' @@ -379,16 +466,18 @@ def _build_item_timings(item_e, item, timeline_range, transition_offsets, end = '-1' source_end += transition_offsets[1] - _insert_new_sub_element(item_e, 'duration', - text=str(int(item.source_range.duration.value))) + _insert_new_sub_element( + item_e, 'duration', + text='{:.0f}'.format(item.source_range.duration.value) + ) _insert_new_sub_element(item_e, 'start', text=start) _insert_new_sub_element(item_e, 'end', text=end) _insert_new_sub_element(item_e, 'in', - text=str(int(source_start.value))) + text='{:.0f}'.format(source_start.value)) _insert_new_sub_element(item_e, 'out', - text=str(int(source_end.value))) + text='{:.0f}'.format(source_end.value)) @_backreference_build('file') @@ -410,8 +499,9 @@ def _build_file(media_reference, br_map): _insert_new_sub_element(file_e, 'name', text=os.path.basename(url_path)) file_e.append(_build_rate(available_range.start_time)) - _insert_new_sub_element(file_e, 'duration', - text=str(available_range.duration.value)) + _insert_new_sub_element( + file_e, 'duration', + text='{:.0f}'.format(available_range.duration.value)) _insert_new_sub_element(file_e, 'pathurl', text=media_reference.target_url) # timecode @@ -424,7 +514,9 @@ def _build_file(media_reference, br_map): 'string', text=otio.opentime.to_timecode(timecode, rate=timecode.rate) ) - _insert_new_sub_element(timecode_e, 'frame', text=str(int(timecode.value))) + _insert_new_sub_element( + timecode_e, 'frame', text='{:.0f}'.format(timecode.value) + ) display_format = 'DF' if (math.ceil(timecode.rate) == 30 and math.ceil(timecode.rate) != timecode.rate) \ else 'NDF' @@ -447,11 +539,13 @@ def _build_file(media_reference, br_map): def _build_transition_item(transition_item, timeline_range, transition_offsets, br_map): transition_e = cElementTree.Element('transitionitem') - _insert_new_sub_element(transition_e, 'start', - text=str(int(timeline_range.start_time.value))) + _insert_new_sub_element( + transition_e, 'start', + text='{:.0f}'.format(timeline_range.start_time.value) + ) _insert_new_sub_element( transition_e, 'end', - text=str(int(timeline_range.end_time_exclusive().value)) + text='{:.0f}'.format(timeline_range.end_time_exclusive().value) ) if not transition_item.in_offset.value: @@ -557,7 +651,7 @@ def _build_item(item, timeline_range, transition_offsets, br_map): raise ValueError('Unsupported item: ' + str(item)) -def _build_track(track, br_map): +def _build_track(track, sequence_rate, br_map): track_e = cElementTree.Element('track') for n, item in enumerate(track): @@ -581,6 +675,10 @@ def _build_track(track, br_map): transition_offsets[1] = None timeline_range = track.range_of_child_at_index(n) + timeline_range = otio.opentime.TimeRange( + timeline_range.start_time.rescaled_to(sequence_rate), + timeline_range.duration.rescaled_to(sequence_rate) + ) track_e.append( _build_item(item, timeline_range, transition_offsets, br_map) ) @@ -596,8 +694,10 @@ def _build_marker(marker): _insert_new_sub_element(marker_e, 'comment', text=comment) _insert_new_sub_element(marker_e, 'name', text=marker.name) - _insert_new_sub_element(marker_e, 'in', - text=str(int(marked_range.start_time.value))) + _insert_new_sub_element( + marker_e, 'in', + text='{:.0f}'.format(marked_range.start_time.value) + ) _insert_new_sub_element(marker_e, 'out', text='-1') return marker_e @@ -607,9 +707,12 @@ def _build_marker(marker): def _build_sequence(stack, timeline_range, br_map): sequence_e = cElementTree.Element('sequence') _insert_new_sub_element(sequence_e, 'name', text=stack.name) - _insert_new_sub_element(sequence_e, 'duration', - text=str(int(timeline_range.duration.value))) + _insert_new_sub_element( + sequence_e, 'duration', + text='{:.0f}'.format(timeline_range.duration.value) + ) sequence_e.append(_build_rate(timeline_range.start_time)) + sequence_rate = timeline_range.start_time.rate media_e = _insert_new_sub_element(sequence_e, 'media') video_e = _insert_new_sub_element(media_e, 'video') @@ -617,9 +720,9 @@ def _build_sequence(stack, timeline_range, br_map): for track in stack: if track.kind == otio.schema.SequenceKind.Video: - video_e.append(_build_track(track, br_map)) + video_e.append(_build_track(track, sequence_rate, br_map)) elif track.kind == otio.schema.SequenceKind.Audio: - audio_e.append(_build_track(track, br_map)) + audio_e.append(_build_track(track, sequence_rate, br_map)) for marker in stack.markers: sequence_e.append(_build_marker(marker)) @@ -627,23 +730,40 @@ def _build_sequence(stack, timeline_range, br_map): return sequence_e +def _build_collection(collection, br_map): + sequences = [] + for item in collection: + if not isinstance(item, otio.schema.Timeline): + continue + + timeline_range = otio.opentime.TimeRange( + start_time=item.global_start_time, + duration=item.duration() + ) + sequences.append(_build_sequence(item.tracks, timeline_range, br_map)) + + return sequences + + # -------------------- # adapter requirements # -------------------- def read_from_string(input_str): tree = cElementTree.fromstring(input_str) - sequence = _get_single_sequence(tree) # element_map encodes the backreference context element_map = collections.defaultdict(dict) + _populate_element_map(tree, element_map) - sequence_rate = _parse_rate(sequence, element_map) - timeline = otio.schema.Timeline(name=sequence.find('./name').text) - timeline.global_start_time = otio.opentime.RationalTime(0, sequence_rate) - timeline.tracks = _parse_sequence(sequence, element_map) + top_level_sequences = _get_top_level_sequences(tree) - return timeline + if len(top_level_sequences) == 1: + return _parse_timeline(top_level_sequences[0], element_map) + elif len(top_level_sequences) > 1: + return _parse_collection(top_level_sequences, element_map) + else: + raise ValueError('No top-level sequences found') def write_to_string(input_otio): @@ -652,14 +772,20 @@ def write_to_string(input_otio): _insert_new_sub_element(project_e, 'name', text=input_otio.name) children_e = _insert_new_sub_element(project_e, 'children') - timeline_range = otio.opentime.TimeRange( - start_time=input_otio.global_start_time, - duration=input_otio.duration() - ) - br_map = collections.defaultdict(dict) - children_e.append( - _build_sequence(input_otio.tracks, timeline_range, br_map) - ) + _populate_backreference_map(input_otio, br_map) + + if isinstance(input_otio, otio.schema.Timeline): + timeline_range = otio.opentime.TimeRange( + start_time=input_otio.global_start_time, + duration=input_otio.duration() + ) + children_e.append( + _build_sequence(input_otio.tracks, timeline_range, br_map) + ) + elif isinstance(input_otio, otio.schema.SerializeableCollection): + children_e.extend( + _build_collection(input_otio, br_map) + ) return _make_pretty_string(tree_e) diff --git a/opentimelineioViewWidget/timeline.py b/opentimelineioViewWidget/timeline.py index 36eaaa494..95089b6d1 100644 --- a/opentimelineioViewWidget/timeline.py +++ b/opentimelineioViewWidget/timeline.py @@ -44,9 +44,19 @@ def itemChange(self, change, value): return super(BaseItem, self).itemChange(change, value) def _add_markers(self): + source_range = (self.item.source_range.start_time, + self.item.source_range.end_time_exclusive()) + for m in self.item.markers: + marked_time = m.marked_range.start_time + if marked_time < source_range[0] or marked_time > source_range[1]: + continue marker = Marker(m, None, None) marker.setY(0.5 * MARKER_SIZE) + marker.setX( + (otio.opentime.to_seconds(m.marked_range.start_time) - + otio.opentime.to_seconds(source_range[0])) * TIME_MULTIPLIER + ) marker.setParentItem(self) def _position_labels(self): @@ -250,8 +260,6 @@ def __init__(self, marker, *args, **kwargs): self.setBrush( QtGui.QBrush(QtGui.QColor(121, 212, 177, 255)) ) - self.setX(otio.opentime.to_seconds( - self.item.marked_range.start_time) * TIME_MULTIPLIER) def paint(self, *args, **kwargs): new_args = [args[0], QtGui.QStyleOptionGraphicsItem()] + list(args[2:]) @@ -328,9 +336,11 @@ def _add_track(self, track, y_pos): def _add_tracks(self): video_tracks = [t for t in self.stack - if t.kind == otio.schema.SequenceKind.Video] + if t.kind == otio.schema.SequenceKind.Video + and list(t)] audio_tracks = [t for t in self.stack - if t.kind == otio.schema.SequenceKind.Audio] + if t.kind == otio.schema.SequenceKind.Audio + and list(t)] video_tracks.reverse() video_tracks_top = TIME_SLIDER_HEIGHT @@ -348,6 +358,8 @@ def _add_tracks(self): def _add_markers(self): for m in self.stack.markers: marker = Marker(m, None, self) + marker.setX(otio.opentime.to_seconds( + m.marked_range.start_time) * TIME_MULTIPLIER) marker.setY(TIME_SLIDER_HEIGHT - MARKER_SIZE) self.addItem(marker) @@ -420,7 +432,8 @@ def set_timeline(self, timeline): # load new timeline self.timeline = timeline - self.add_stack(timeline.tracks) + if timeline is not None: + self.add_stack(timeline.tracks) def add_stack(self, stack): tab_index = next((i for i in range(self.count()) diff --git a/tests/test_fcp7_xml_adapter.py b/tests/test_fcp7_xml_adapter.py index 5103c9e5e..3e19feebd 100644 --- a/tests/test_fcp7_xml_adapter.py +++ b/tests/test_fcp7_xml_adapter.py @@ -141,7 +141,7 @@ def test_backreference_generator_read(self): adapt_mod = otio.adapters.from_name('fcp_xml').module() tree = cElementTree.fromstring(text) - sequence = adapt_mod._get_single_sequence(tree) + sequence = adapt_mod._get_top_level_sequences(tree)[0] # make sure that element_map gets populated by the function calls in # the way we want