diff --git a/README.md b/README.md
index 3b4b7dae51..43d47a424c 100644
--- a/README.md
+++ b/README.md
@@ -69,6 +69,9 @@ adapter plugins. This includes Final Cut Pro XML, AAF, CMX 3600 EDL, and more.
For more information about this, including supported formats, see: https://opentimelineio.readthedocs.io/en/latest/tutorials/adapters.html
+The **AAF Adapter** has been **relocated** to a separate repository and is located here:
+https://github.com/OpenTimelineIO/otio-aaf-adapter
+
Other Plugins
-------------
diff --git a/contrib/opentimelineio_contrib/adapters/aaf_adapter/__init__.py b/contrib/opentimelineio_contrib/adapters/aaf_adapter/__init__.py
deleted file mode 100644
index 686a8cb5fd..0000000000
--- a/contrib/opentimelineio_contrib/adapters/aaf_adapter/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright Contributors to the OpenTimelineIO project
diff --git a/contrib/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py b/contrib/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py
deleted file mode 100644
index 63e6749e39..0000000000
--- a/contrib/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py
+++ /dev/null
@@ -1,776 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright Contributors to the OpenTimelineIO project
-
-"""AAF Adapter Transcriber
-
-Specifies how to transcribe an OpenTimelineIO file into an AAF file.
-"""
-
-import aaf2
-import abc
-import uuid
-import opentimelineio as otio
-import os
-import copy
-import re
-
-
-AAF_PARAMETERDEF_PAN = aaf2.auid.AUID("e4962322-2267-11d3-8a4c-0050040ef7d2")
-AAF_OPERATIONDEF_MONOAUDIOPAN = aaf2.auid.AUID("9d2ea893-0968-11d3-8a38-0050040ef7d2")
-AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER = uuid.UUID(
- "c0038672-a8cf-11d3-a05b-006094eb75cb")
-AAF_PARAMETERDEF_AVIDEFFECTID = uuid.UUID(
- "93994bd6-a81d-11d3-a05b-006094eb75cb")
-AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U = uuid.UUID(
- "8d56813d-847e-11d5-935a-50f857c10000")
-AAF_PARAMETERDEF_LEVEL = uuid.UUID("e4962320-2267-11d3-8a4c-0050040ef7d2")
-AAF_VVAL_EXTRAPOLATION_ID = uuid.UUID("0e24dd54-66cd-4f1a-b0a0-670ac3a7a0b3")
-AAF_OPERATIONDEF_SUBMASTER = uuid.UUID("f1db0f3d-8d64-11d3-80df-006008143e6f")
-
-
-def _is_considered_gap(thing):
- """Returns whether or not thiing can be considered gap.
-
- TODO: turns generators w/ kind "Slug" inito gap. Should probably generate
- opaque black instead.
- """
- if isinstance(thing, otio.schema.Gap):
- return True
-
- if (
- isinstance(thing, otio.schema.Clip)
- and isinstance(
- thing.media_reference,
- otio.schema.GeneratorReference)
- ):
- if thing.media_reference.generator_kind in ("Slug",):
- return True
- else:
- raise otio.exceptions.NotSupportedError(
- "AAF adapter does not support generator references of kind"
- " '{}'".format(thing.media_reference.generator_kind)
- )
-
- return False
-
-
-class AAFAdapterError(otio.exceptions.OTIOError):
- pass
-
-
-class AAFValidationError(AAFAdapterError):
- pass
-
-
-class AAFFileTranscriber:
- """
- AAFFileTranscriber
-
- AAFFileTranscriber manages the file-level knowledge during a conversion from
- otio to aaf. This includes keeping track of unique tapemobs and mastermobs.
- """
-
- def __init__(self, input_otio, aaf_file, **kwargs):
- """
- AAFFileTranscriber requires an input timeline and an output pyaaf2 file handle.
-
- Args:
- input_otio: an input OpenTimelineIO timeline
- aaf_file: a pyaaf2 file handle to an output file
- """
- self.aaf_file = aaf_file
- self.compositionmob = self.aaf_file.create.CompositionMob()
- self.compositionmob.name = input_otio.name
- self.compositionmob.usage = "Usage_TopLevel"
- self.aaf_file.content.mobs.append(self.compositionmob)
- self._unique_mastermobs = {}
- self._unique_tapemobs = {}
- self._clip_mob_ids_map = _gather_clip_mob_ids(input_otio, **kwargs)
-
- def _unique_mastermob(self, otio_clip):
- """Get a unique mastermob, identified by clip metadata mob id."""
- mob_id = self._clip_mob_ids_map.get(otio_clip)
- mastermob = self._unique_mastermobs.get(mob_id)
- if not mastermob:
- mastermob = self.aaf_file.create.MasterMob()
- mastermob.name = otio_clip.name
- mastermob.mob_id = aaf2.mobid.MobID(mob_id)
- self.aaf_file.content.mobs.append(mastermob)
- self._unique_mastermobs[mob_id] = mastermob
- return mastermob
-
- def _unique_tapemob(self, otio_clip):
- """Get a unique tapemob, identified by clip metadata mob id."""
- mob_id = self._clip_mob_ids_map.get(otio_clip)
- tapemob = self._unique_tapemobs.get(mob_id)
- if not tapemob:
- tapemob = self.aaf_file.create.SourceMob()
- tapemob.name = otio_clip.name
- tapemob.descriptor = self.aaf_file.create.ImportDescriptor()
- # If the edit_rate is not an integer, we need
- # to use drop frame with a nominal integer fps.
- edit_rate = otio_clip.visible_range().duration.rate
- timecode_fps = round(edit_rate)
- tape_timecode_slot = tapemob.create_timecode_slot(
- edit_rate=edit_rate,
- timecode_fps=timecode_fps,
- drop_frame=(edit_rate != timecode_fps)
- )
- timecode_start = int(
- otio_clip.media_reference.available_range.start_time.value
- )
- timecode_length = int(
- otio_clip.media_reference.available_range.duration.value
- )
-
- tape_timecode_slot.segment.start = int(timecode_start)
- tape_timecode_slot.segment.length = int(timecode_length)
- self.aaf_file.content.mobs.append(tapemob)
- self._unique_tapemobs[mob_id] = tapemob
- return tapemob
-
- def track_transcriber(self, otio_track):
- """Return an appropriate _TrackTranscriber given an otio track."""
- if otio_track.kind == otio.schema.TrackKind.Video:
- transcriber = VideoTrackTranscriber(self, otio_track)
- elif otio_track.kind == otio.schema.TrackKind.Audio:
- transcriber = AudioTrackTranscriber(self, otio_track)
- else:
- raise otio.exceptions.NotSupportedError(
- f"Unsupported track kind: {otio_track.kind}")
- return transcriber
-
-
-def validate_metadata(timeline):
- """Print a check of necessary metadata requirements for an otio timeline."""
-
- all_checks = [__check(timeline, "duration().rate")]
- edit_rate = __check(timeline, "duration().rate").value
-
- for child in timeline.find_children():
- checks = []
- if _is_considered_gap(child):
- checks = [
- __check(child, "duration().rate").equals(edit_rate)
- ]
- if isinstance(child, otio.schema.Clip):
- checks = [
- __check(child, "duration().rate").equals(edit_rate),
- __check(child, "media_reference.available_range.duration.rate"
- ).equals(edit_rate),
- __check(child, "media_reference.available_range.start_time.rate"
- ).equals(edit_rate)
- ]
- if isinstance(child, otio.schema.Transition):
- checks = [
- __check(child, "duration().rate").equals(edit_rate),
- __check(child, "metadata['AAF']['PointList']"),
- __check(child, "metadata['AAF']['OperationGroup']['Operation']"
- "['DataDefinition']['Name']"),
- __check(child, "metadata['AAF']['OperationGroup']['Operation']"
- "['Description']"),
- __check(child, "metadata['AAF']['OperationGroup']['Operation']"
- "['Name']"),
- __check(child, "metadata['AAF']['CutPoint']")
- ]
- all_checks.extend(checks)
-
- if any(check.errors for check in all_checks):
- raise AAFValidationError("\n" + "\n".join(
- sum([check.errors for check in all_checks], [])))
-
-
-def _gather_clip_mob_ids(input_otio,
- prefer_file_mob_id=False,
- use_empty_mob_ids=False,
- **kwargs):
- """
- Create dictionary of otio clips with their corresponding mob ids.
- """
-
- def _from_clip_metadata(clip):
- """Get the MobID from the clip.metadata."""
- return clip.metadata.get("AAF", {}).get("SourceID")
-
- def _from_media_reference_metadata(clip):
- """Get the MobID from the media_reference.metadata."""
- return (clip.media_reference.metadata.get("AAF", {}).get("MobID") or
- clip.media_reference.metadata.get("AAF", {}).get("SourceID"))
-
- def _from_aaf_file(clip):
- """ Get the MobID from the AAF file itself."""
- mob_id = None
- target_url = clip.media_reference.target_url
- if os.path.isfile(target_url) and target_url.endswith("aaf"):
- with aaf2.open(clip.media_reference.target_url) as aaf_file:
- mastermobs = list(aaf_file.content.mastermobs())
- if len(mastermobs) == 1:
- mob_id = mastermobs[0].mob_id
- return mob_id
-
- def _generate_empty_mobid(clip):
- """Generate a meaningless MobID."""
- return aaf2.mobid.MobID.new()
-
- strategies = [
- _from_clip_metadata,
- _from_media_reference_metadata,
- _from_aaf_file
- ]
-
- if prefer_file_mob_id:
- strategies.remove(_from_aaf_file)
- strategies.insert(0, _from_aaf_file)
-
- if use_empty_mob_ids:
- strategies.append(_generate_empty_mobid)
-
- clip_mob_ids = {}
-
- for otio_clip in input_otio.find_clips():
- if _is_considered_gap(otio_clip):
- continue
- for strategy in strategies:
- mob_id = strategy(otio_clip)
- if mob_id:
- clip_mob_ids[otio_clip] = mob_id
- break
- else:
- raise AAFAdapterError(f"Cannot find mob ID for clip {otio_clip}")
-
- return clip_mob_ids
-
-
-def _stackify_nested_groups(timeline):
- """
- Ensure that all nesting in a given timeline is in a stack container.
- This conforms with how AAF thinks about nesting, there needs
- to be an outer container, even if it's just one object.
- """
- copied = copy.deepcopy(timeline)
- for track in copied.tracks:
- for i, child in enumerate(track.find_children()):
- is_nested = isinstance(child, otio.schema.Track)
- is_parent_in_stack = isinstance(child.parent(), otio.schema.Stack)
- if is_nested and not is_parent_in_stack:
- stack = otio.schema.Stack()
- track.remove(child)
- stack.append(child)
- track.insert(i, stack)
- return copied
-
-
-class _TrackTranscriber:
- """
- _TrackTranscriber is the base class for the conversion of a given otio track.
-
- _TrackTranscriber is not meant to be used by itself. It provides the common
- functionality to inherit from. We need an abstract base class because Audio and
- Video are handled differently.
- """
- __metaclass__ = abc.ABCMeta
-
- def __init__(self, root_file_transcriber, otio_track):
- """
- _TrackTranscriber
-
- Args:
- root_file_transcriber: the corresponding 'parent' AAFFileTranscriber object
- otio_track: the given otio_track to convert
- """
- self.root_file_transcriber = root_file_transcriber
- self.compositionmob = root_file_transcriber.compositionmob
- self.aaf_file = root_file_transcriber.aaf_file
- self.otio_track = otio_track
- self.edit_rate = self.otio_track.find_children()[0].duration().rate
- self.timeline_mobslot, self.sequence = self._create_timeline_mobslot()
- self.timeline_mobslot.name = self.otio_track.name
-
- def transcribe(self, otio_child):
- """Transcribe otio child to corresponding AAF object"""
- if _is_considered_gap(otio_child):
- filler = self.aaf_filler(otio_child)
- return filler
- elif isinstance(otio_child, otio.schema.Transition):
- transition = self.aaf_transition(otio_child)
- return transition
- elif isinstance(otio_child, otio.schema.Clip):
- source_clip = self.aaf_sourceclip(otio_child)
- return source_clip
- elif isinstance(otio_child, otio.schema.Track):
- sequence = self.aaf_sequence(otio_child)
- return sequence
- elif isinstance(otio_child, otio.schema.Stack):
- operation_group = self.aaf_operation_group(otio_child)
- return operation_group
- else:
- raise otio.exceptions.NotSupportedError(
- f"Unsupported otio child type: {type(otio_child)}")
-
- @property
- @abc.abstractmethod
- def media_kind(self):
- """Return the string for what kind of track this is."""
- pass
-
- @property
- @abc.abstractmethod
- def _master_mob_slot_id(self):
- """
- Return the MasterMob Slot ID for the corresponding track media kind
- """
- # MasterMob's and MasterMob slots have to be unique. We handle unique
- # MasterMob's with _unique_mastermob(). We also need to protect against
- # duplicate MasterMob slots. As of now, we mandate all picture clips to
- # be created in MasterMob slot 1 and all sound clips to be created in
- # MasterMob slot 2. While this is a little inadequate, it works for now
- pass
-
- @abc.abstractmethod
- def _create_timeline_mobslot(self):
- """
- Return a timeline_mobslot and sequence for this track.
-
- In AAF, a TimelineMobSlot is a container for the Sequence. A Sequence is
- analogous to an otio track.
-
- Returns:
- Returns a tuple of (TimelineMobSlot, Sequence)
- """
- pass
-
- @abc.abstractmethod
- def default_descriptor(self, otio_clip):
- pass
-
- @abc.abstractmethod
- def _transition_parameters(self):
- pass
-
- def aaf_filler(self, otio_gap):
- """Convert an otio Gap into an aaf Filler"""
- length = int(otio_gap.visible_range().duration.value)
- filler = self.aaf_file.create.Filler(self.media_kind, length)
- return filler
-
- def aaf_sourceclip(self, otio_clip):
- """Convert an otio Clip into an aaf SourceClip"""
- tapemob, tapemob_slot = self._create_tapemob(otio_clip)
- filemob, filemob_slot = self._create_filemob(otio_clip, tapemob, tapemob_slot)
- mastermob, mastermob_slot = self._create_mastermob(otio_clip,
- filemob,
- filemob_slot)
-
- # We need both `start_time` and `duration`
- # Here `start` is the offset between `first` and `in` values.
-
- offset = (otio_clip.visible_range().start_time -
- otio_clip.available_range().start_time)
- start = offset.value
- length = otio_clip.visible_range().duration.value
-
- compmob_clip = self.compositionmob.create_source_clip(
- slot_id=self.timeline_mobslot.slot_id,
- # XXX: Python3 requires these to be passed as explicit ints
- start=int(start),
- length=int(length),
- media_kind=self.media_kind
- )
- compmob_clip.mob = mastermob
- compmob_clip.slot = mastermob_slot
- compmob_clip.slot_id = mastermob_slot.slot_id
- return compmob_clip
-
- def aaf_transition(self, otio_transition):
- """Convert an otio Transition into an aaf Transition"""
- if (otio_transition.transition_type !=
- otio.schema.TransitionTypes.SMPTE_Dissolve):
- print(
- "Unsupported transition type: {}".format(
- otio_transition.transition_type))
- return None
-
- transition_params, varying_value = self._transition_parameters()
-
- interpolation_def = self.aaf_file.create.InterpolationDef(
- aaf2.misc.LinearInterp, "LinearInterp", "Linear keyframe interpolation")
- self.aaf_file.dictionary.register_def(interpolation_def)
- varying_value["Interpolation"].value = (
- self.aaf_file.dictionary.lookup_interperlationdef("LinearInterp"))
-
- pointlist = otio_transition.metadata["AAF"]["PointList"]
-
- c1 = self.aaf_file.create.ControlPoint()
- c1["EditHint"].value = "Proportional"
- c1.value = pointlist[0]["Value"]
- c1.time = pointlist[0]["Time"]
-
- c2 = self.aaf_file.create.ControlPoint()
- c2["EditHint"].value = "Proportional"
- c2.value = pointlist[1]["Value"]
- c2.time = pointlist[1]["Time"]
-
- varying_value["PointList"].extend([c1, c2])
-
- op_group_metadata = otio_transition.metadata["AAF"]["OperationGroup"]
- effect_id = op_group_metadata["Operation"].get("Identification")
- is_time_warp = op_group_metadata["Operation"].get("IsTimeWarp")
- by_pass = op_group_metadata["Operation"].get("Bypass")
- number_inputs = op_group_metadata["Operation"].get("NumberInputs")
- operation_category = op_group_metadata["Operation"].get("OperationCategory")
- data_def_name = op_group_metadata["Operation"]["DataDefinition"]["Name"]
- data_def = self.aaf_file.dictionary.lookup_datadef(str(data_def_name))
- description = op_group_metadata["Operation"]["Description"]
- op_def_name = otio_transition.metadata["AAF"][
- "OperationGroup"
- ]["Operation"]["Name"]
-
- # Create OperationDefinition
- op_def = self.aaf_file.create.OperationDef(uuid.UUID(effect_id), op_def_name)
- self.aaf_file.dictionary.register_def(op_def)
- op_def.media_kind = self.media_kind
- datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind)
- op_def["IsTimeWarp"].value = is_time_warp
- op_def["Bypass"].value = by_pass
- op_def["NumberInputs"].value = number_inputs
- op_def["OperationCategory"].value = str(operation_category)
- op_def["ParametersDefined"].extend(transition_params)
- op_def["DataDefinition"].value = data_def
- op_def["Description"].value = str(description)
-
- # Create OperationGroup
- length = int(otio_transition.duration().value)
- operation_group = self.aaf_file.create.OperationGroup(op_def, length)
- operation_group["DataDefinition"].value = datadef
- operation_group["Parameters"].append(varying_value)
-
- # Create Transition
- transition = self.aaf_file.create.Transition(self.media_kind, length)
- transition["OperationGroup"].value = operation_group
- transition["CutPoint"].value = otio_transition.metadata["AAF"]["CutPoint"]
- transition["DataDefinition"].value = datadef
- return transition
-
- def aaf_sequence(self, otio_track):
- """Convert an otio Track into an aaf Sequence"""
- sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind)
- length = 0
- for nested_otio_child in otio_track:
- result = self.transcribe(nested_otio_child)
- length += result.length
- sequence.components.append(result)
- sequence.length = length
- return sequence
-
- def aaf_operation_group(self, otio_stack):
- """
- Create and return an OperationGroup which will contain other AAF objects
- to support OTIO nesting
- """
- # Create OperationDefinition
- op_def = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_SUBMASTER,
- "Submaster")
- self.aaf_file.dictionary.register_def(op_def)
- op_def.media_kind = self.media_kind
- datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind)
-
- # These values are necessary for pyaaf2 OperationDefinitions
- op_def["IsTimeWarp"].value = False
- op_def["Bypass"].value = 0
- op_def["NumberInputs"].value = -1
- op_def["OperationCategory"].value = "OperationCategory_Effect"
- op_def["DataDefinition"].value = datadef
-
- # Create OperationGroup
- operation_group = self.aaf_file.create.OperationGroup(op_def)
- operation_group.media_kind = self.media_kind
- operation_group["DataDefinition"].value = datadef
-
- length = 0
- for nested_otio_child in otio_stack:
- result = self.transcribe(nested_otio_child)
- length += result.length
- operation_group.segments.append(result)
- operation_group.length = length
- return operation_group
-
- def _create_tapemob(self, otio_clip):
- """
- Return a physical sourcemob for an otio Clip based on the MobID.
-
- Returns:
- Returns a tuple of (TapeMob, TapeMobSlot)
- """
- tapemob = self.root_file_transcriber._unique_tapemob(otio_clip)
- tapemob_slot = tapemob.create_empty_slot(self.edit_rate, self.media_kind)
- tapemob_slot.segment.length = int(
- otio_clip.media_reference.available_range.duration.value)
- return tapemob, tapemob_slot
-
- def _create_filemob(self, otio_clip, tapemob, tapemob_slot):
- """
- Return a file sourcemob for an otio Clip. Needs a tapemob and tapemob slot.
-
- Returns:
- Returns a tuple of (FileMob, FileMobSlot)
- """
- filemob = self.aaf_file.create.SourceMob()
- self.aaf_file.content.mobs.append(filemob)
-
- filemob.descriptor = self.default_descriptor(otio_clip)
- filemob_slot = filemob.create_timeline_slot(self.edit_rate)
- filemob_clip = filemob.create_source_clip(
- slot_id=filemob_slot.slot_id,
- length=tapemob_slot.segment.length,
- media_kind=tapemob_slot.segment.media_kind)
- filemob_clip.mob = tapemob
- filemob_clip.slot = tapemob_slot
- filemob_clip.slot_id = tapemob_slot.slot_id
- filemob_slot.segment = filemob_clip
- return filemob, filemob_slot
-
- def _create_mastermob(self, otio_clip, filemob, filemob_slot):
- """
- Return a mastermob for an otio Clip. Needs a filemob and filemob slot.
-
- Returns:
- Returns a tuple of (MasterMob, MasterMobSlot)
- """
- mastermob = self.root_file_transcriber._unique_mastermob(otio_clip)
- timecode_length = int(otio_clip.media_reference.available_range.duration.value)
-
- try:
- mastermob_slot = mastermob.slot_at(self._master_mob_slot_id)
- except IndexError:
- mastermob_slot = (
- mastermob.create_timeline_slot(edit_rate=self.edit_rate,
- slot_id=self._master_mob_slot_id))
- mastermob_clip = mastermob.create_source_clip(
- slot_id=mastermob_slot.slot_id,
- length=timecode_length,
- media_kind=self.media_kind)
- mastermob_clip.mob = filemob
- mastermob_clip.slot = filemob_slot
- mastermob_clip.slot_id = filemob_slot.slot_id
- mastermob_slot.segment = mastermob_clip
- return mastermob, mastermob_slot
-
-
-class VideoTrackTranscriber(_TrackTranscriber):
- """Video track kind specialization of TrackTranscriber."""
-
- @property
- def media_kind(self):
- return "picture"
-
- @property
- def _master_mob_slot_id(self):
- return 1
-
- def _create_timeline_mobslot(self):
- """
- Create a Sequence container (TimelineMobSlot) and Sequence.
-
- TimelineMobSlot --> Sequence
- """
- timeline_mobslot = self.compositionmob.create_timeline_slot(
- edit_rate=self.edit_rate)
- sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind)
- timeline_mobslot.segment = sequence
- return timeline_mobslot, sequence
-
- def default_descriptor(self, otio_clip):
- # TODO: Determine if these values are the correct, and if so,
- # maybe they should be in the AAF metadata
- descriptor = self.aaf_file.create.CDCIDescriptor()
- descriptor["ComponentWidth"].value = 8
- descriptor["HorizontalSubsampling"].value = 2
- descriptor["ImageAspectRatio"].value = "16/9"
- descriptor["StoredWidth"].value = 1920
- descriptor["StoredHeight"].value = 1080
- descriptor["FrameLayout"].value = "FullFrame"
- descriptor["VideoLineMap"].value = [42, 0]
- descriptor["SampleRate"].value = 24
- descriptor["Length"].value = 1
- return descriptor
-
- def _transition_parameters(self):
- """
- Return video transition parameters
- """
- # Create ParameterDef for AvidParameterByteOrder
- byteorder_typedef = self.aaf_file.dictionary.lookup_typedef("aafUInt16")
- param_byteorder = self.aaf_file.create.ParameterDef(
- AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER,
- "AvidParameterByteOrder",
- "",
- byteorder_typedef)
- self.aaf_file.dictionary.register_def(param_byteorder)
-
- # Create ParameterDef for AvidEffectID
- avid_effect_typdef = self.aaf_file.dictionary.lookup_typedef("AvidBagOfBits")
- param_effect_id = self.aaf_file.create.ParameterDef(
- AAF_PARAMETERDEF_AVIDEFFECTID,
- "AvidEffectID",
- "",
- avid_effect_typdef)
- self.aaf_file.dictionary.register_def(param_effect_id)
-
- # Create ParameterDef for AFX_FG_KEY_OPACITY_U
- opacity_param_def = self.aaf_file.dictionary.lookup_typedef("Rational")
- opacity_param = self.aaf_file.create.ParameterDef(
- AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U,
- "AFX_FG_KEY_OPACITY_U",
- "",
- opacity_param_def)
- self.aaf_file.dictionary.register_def(opacity_param)
-
- # Create VaryingValue
- opacity_u = self.aaf_file.create.VaryingValue()
- opacity_u.parameterdef = self.aaf_file.dictionary.lookup_parameterdef(
- "AFX_FG_KEY_OPACITY_U")
- opacity_u["VVal_Extrapolation"].value = AAF_VVAL_EXTRAPOLATION_ID
- opacity_u["VVal_FieldCount"].value = 1
-
- return [param_byteorder, param_effect_id], opacity_u
-
-
-class AudioTrackTranscriber(_TrackTranscriber):
- """Audio track kind specialization of TrackTranscriber."""
-
- @property
- def media_kind(self):
- return "sound"
-
- @property
- def _master_mob_slot_id(self):
- return 2
-
- def aaf_sourceclip(self, otio_clip):
- # Parameter Definition
- typedef = self.aaf_file.dictionary.lookup_typedef("Rational")
- param_def = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_PAN,
- "Pan",
- "Pan",
- typedef)
- self.aaf_file.dictionary.register_def(param_def)
- interp_def = self.aaf_file.create.InterpolationDef(aaf2.misc.LinearInterp,
- "LinearInterp",
- "LinearInterp")
- self.aaf_file.dictionary.register_def(interp_def)
- # PointList
- length = int(otio_clip.duration().value)
- c1 = self.aaf_file.create.ControlPoint()
- c1["ControlPointSource"].value = 2
- c1["Time"].value = aaf2.rational.AAFRational(f"0/{length}")
- c1["Value"].value = 0
- c2 = self.aaf_file.create.ControlPoint()
- c2["ControlPointSource"].value = 2
- c2["Time"].value = aaf2.rational.AAFRational(f"{length - 1}/{length}")
- c2["Value"].value = 0
- varying_value = self.aaf_file.create.VaryingValue()
- varying_value.parameterdef = param_def
- varying_value["Interpolation"].value = interp_def
- varying_value["PointList"].extend([c1, c2])
- opgroup = self.timeline_mobslot.segment
- opgroup.parameters.append(varying_value)
-
- return super().aaf_sourceclip(otio_clip)
-
- def _create_timeline_mobslot(self):
- """
- Create a Sequence container (TimelineMobSlot) and Sequence.
- Sequence needs to be in an OperationGroup.
-
- TimelineMobSlot --> OperationGroup --> Sequence
- """
- # TimelineMobSlot
- timeline_mobslot = self.compositionmob.create_sound_slot(
- edit_rate=self.edit_rate)
- # OperationDefinition
- opdef = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_MONOAUDIOPAN,
- "Audio Pan")
- opdef.media_kind = self.media_kind
- opdef["NumberInputs"].value = 1
- self.aaf_file.dictionary.register_def(opdef)
- # OperationGroup
- total_length = int(sum([t.duration().value for t in self.otio_track]))
- opgroup = self.aaf_file.create.OperationGroup(opdef)
- opgroup.media_kind = self.media_kind
- opgroup.length = total_length
- timeline_mobslot.segment = opgroup
- # Sequence
- sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind)
- sequence.length = total_length
- opgroup.segments.append(sequence)
- return timeline_mobslot, sequence
-
- def default_descriptor(self, otio_clip):
- descriptor = self.aaf_file.create.PCMDescriptor()
- descriptor["AverageBPS"].value = 96000
- descriptor["BlockAlign"].value = 2
- descriptor["QuantizationBits"].value = 16
- descriptor["AudioSamplingRate"].value = 48000
- descriptor["Channels"].value = 1
- descriptor["SampleRate"].value = 48000
- descriptor["Length"].value = int(
- otio_clip.media_reference.available_range.duration.value
- )
- return descriptor
-
- def _transition_parameters(self):
- """
- Return audio transition parameters
- """
- # Create ParameterDef for ParameterDef_Level
- def_level_typedef = self.aaf_file.dictionary.lookup_typedef("Rational")
- param_def_level = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_LEVEL,
- "ParameterDef_Level",
- "",
- def_level_typedef)
- self.aaf_file.dictionary.register_def(param_def_level)
-
- # Create VaryingValue
- level = self.aaf_file.create.VaryingValue()
- level.parameterdef = (
- self.aaf_file.dictionary.lookup_parameterdef("ParameterDef_Level"))
-
- return [param_def_level], level
-
-
-class __check:
- """
- __check is a private helper class that safely gets values given to check
- for existence and equality
- """
-
- def __init__(self, obj, tokenpath):
- self.orig = obj
- self.value = obj
- self.errors = []
- self.tokenpath = tokenpath
- try:
- for token in re.split(r"[\.\[]", tokenpath):
- if token.endswith("()"):
- self.value = getattr(self.value, token.replace("()", ""))()
- elif "]" in token:
- self.value = self.value[token.strip("[]'\"")]
- else:
- self.value = getattr(self.value, token)
- except Exception as e:
- self.value = None
- self.errors.append("{}{} {}.{} does not exist, {}".format(
- self.orig.name if hasattr(self.orig, "name") else "",
- type(self.orig),
- type(self.orig).__name__,
- self.tokenpath, e))
-
- def equals(self, val):
- """Check if the retrieved value is equal to a given value."""
- if self.value is not None and self.value != val:
- self.errors.append(
- "{}{} {}.{} not equal to {} (expected) != {} (actual)".format(
- self.orig.name if hasattr(self.orig, "name") else "",
- type(self.orig),
- type(self.orig).__name__, self.tokenpath, val, self.value))
- return self
diff --git a/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py b/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py
deleted file mode 100644
index 41feb2bbd7..0000000000
--- a/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py
+++ /dev/null
@@ -1,1622 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright Contributors to the OpenTimelineIO project
-
-"""OpenTimelineIO Advanced Authoring Format (AAF) Adapter
-
-Depending on if/where PyAAF is installed, you may need to set this env var:
- OTIO_AAF_PYTHON_LIB - should point at the PyAAF module.
-"""
-import colorsys
-import copy
-import numbers
-import os
-import sys
-
-import collections
-import fractions
-import opentimelineio as otio
-
-lib_path = os.environ.get("OTIO_AAF_PYTHON_LIB")
-if lib_path and lib_path not in sys.path:
- sys.path.insert(0, lib_path)
-
-import aaf2 # noqa: E402
-import aaf2.content # noqa: E402
-import aaf2.mobs # noqa: E402
-import aaf2.components # noqa: E402
-import aaf2.core # noqa: E402
-import aaf2.misc # noqa: E402
-from opentimelineio_contrib.adapters.aaf_adapter import aaf_writer # noqa: E402
-
-
-debug = False
-
-# If enabled, output recursive traversal info of _transcribe() method.
-_TRANSCRIBE_DEBUG = False
-
-# bake keyframed parameter
-_BAKE_KEYFRAMED_PROPERTIES_VALUES = False
-
-_PROPERTY_INTERPOLATION_MAP = {
- aaf2.misc.ConstantInterp: "Constant",
- aaf2.misc.LinearInterp: "Linear",
- aaf2.misc.BezierInterpolator: "Bezier",
- aaf2.misc.CubicInterpolator: "Cubic",
-}
-
-
-def _transcribe_log(s, indent=0, always_print=False):
- if always_print or _TRANSCRIBE_DEBUG:
- print("{}{}".format(" " * indent, s))
-
-
-class AAFAdapterError(otio.exceptions.OTIOError):
- """ Raised for AAF adatper-specific errors. """
-
-
-def _get_parameter(item, parameter_name):
- values = {value.name: value for value in item.parameters.value}
- return values.get(parameter_name)
-
-
-def _encoded_name(item):
-
- name = _get_name(item)
- return name.encode("utf-8", "replace")
-
-
-def _get_name(item):
- if isinstance(item, aaf2.components.SourceClip):
- try:
- return item.mob.name or "Untitled SourceClip"
- except AttributeError:
- # Some AAFs produce this error:
- # RuntimeError: failed with [-2146303738]: mob not found
- return "SourceClip Missing Mob"
- if hasattr(item, 'name'):
- name = item.name
- if name:
- return name
- return _get_class_name(item)
-
-
-def _get_class_name(item):
- if hasattr(item, "class_name"):
- return item.class_name
- else:
- return item.__class__.__name__
-
-
-def _transcribe_property(prop, owner=None):
- if isinstance(prop, (str, numbers.Integral, float, dict)):
- return prop
- elif isinstance(prop, set):
- return list(prop)
- elif isinstance(prop, list):
- result = {}
- for child in prop:
- if hasattr(child, "name"):
- if isinstance(child, aaf2.misc.VaryingValue):
- # keyframed values
- control_points = []
- for control_point in child["PointList"]:
- try:
- # Some values cannot be transcribed yet
- control_points.append(
- [
- control_point.time,
- _transcribe_property(control_point.value),
- ]
- )
- except TypeError:
- _transcribe_log(
- "Unable to transcribe value for property: "
- "'{}' (Type: '{}', Parent: '{}')".format(
- child.name, type(child), prop
- )
- )
-
- # bake keyframe values for owner time range
- baked_values = None
- if _BAKE_KEYFRAMED_PROPERTIES_VALUES:
- if isinstance(owner, aaf2.components.Component):
- baked_values = []
- for t in range(0, owner.length):
- baked_values.append([t, child.value_at(t)])
- else:
- _transcribe_log(
- "Unable to bake values for property: "
- "'{}'. Owner: {}, Control Points: {}".format(
- child.name, owner, control_points
- )
- )
-
- value_dict = {
- "_aaf_keyframed_property": True,
- "keyframe_values": control_points,
- "keyframe_interpolation": _PROPERTY_INTERPOLATION_MAP.get(
- child.interpolationdef.auid, "Linear"
- ),
- "keyframe_baked_values": baked_values
- }
- result[child.name] = value_dict
-
- elif hasattr(child, "value"):
- # static value
- result[child.name] = _transcribe_property(child.value, owner=owner)
- else:
- # @TODO: There may be more properties that we might want also.
- # If you want to see what is being skipped, turn on debug.
- if debug:
- debug_message = "Skipping unrecognized property: '{}', parent '{}'"
- _transcribe_log(debug_message.format(child, prop))
- return result
- elif hasattr(prop, "properties"):
- result = {}
- for child in prop.properties():
- result[child.name] = _transcribe_property(child.value, owner=owner)
- return result
- else:
- return str(prop)
-
-
-def _otio_color_from_hue(hue):
- """Return an OTIO marker color, based on hue in range of [0.0, 1.0].
-
- Args:
- hue (float): marker color hue value
-
- Returns:
- otio.schema.MarkerColor: converted / estimated marker color
-
- """
- if hue <= 0.04 or hue > 0.93:
- return otio.schema.MarkerColor.RED
- if hue <= 0.13:
- return otio.schema.MarkerColor.ORANGE
- if hue <= 0.2:
- return otio.schema.MarkerColor.YELLOW
- if hue <= 0.43:
- return otio.schema.MarkerColor.GREEN
- if hue <= 0.52:
- return otio.schema.MarkerColor.CYAN
- if hue <= 0.74:
- return otio.schema.MarkerColor.BLUE
- if hue <= 0.82:
- return otio.schema.MarkerColor.PURPLE
- return otio.schema.MarkerColor.MAGENTA
-
-
-def _marker_color_from_string(color):
- """Tries to derive a valid marker color from a string.
-
- Args:
- color (str): color name (e.g. "Yellow")
-
- Returns:
- otio.schema.MarkerColor: matching color or `None`
- """
- if not color:
- return
-
- return getattr(otio.schema.MarkerColor, color.upper(), None)
-
-
-def _convert_rgb_to_marker_color(rgb_dict):
- """Returns a matching OTIO marker color for a given AAF color string.
-
- Adapted from `get_nearest_otio_color()` in the `xges.py` adapter.
-
- Args:
- rgb_dict (dict): marker color as dict,
- e.g. `"{'red': 41471, 'green': 12134, 'blue': 6564}"`
-
- Returns:
- otio.schema.MarkerColor: converted / estimated marker color
-
- """
-
- float_colors = {
- (1.0, 0.0, 0.0): otio.schema.MarkerColor.RED,
- (0.0, 1.0, 0.0): otio.schema.MarkerColor.GREEN,
- (0.0, 0.0, 1.0): otio.schema.MarkerColor.BLUE,
- (0.0, 0.0, 0.0): otio.schema.MarkerColor.BLACK,
- (1.0, 1.0, 1.0): otio.schema.MarkerColor.WHITE,
- }
-
- # convert from UInt to float
- red = float(rgb_dict["red"]) / 65535.0
- green = float(rgb_dict["green"]) / 65535.0
- blue = float(rgb_dict["blue"]) / 65535.0
- rgb_float = (red, green, blue)
-
- # check for exact match
- marker_color = float_colors.get(rgb_float)
- if marker_color:
- return marker_color
-
- # try to get an approxiate match based on hue
- hue, lightness, saturation = colorsys.rgb_to_hls(red, green, blue)
- nearest = None
- if saturation < 0.2:
- if lightness > 0.65:
- nearest = otio.schema.MarkerColor.WHITE
- else:
- nearest = otio.schema.MarkerColor.BLACK
- if nearest is None:
- if lightness < 0.13:
- nearest = otio.schema.MarkerColor.BLACK
- if lightness > 0.9:
- nearest = otio.schema.MarkerColor.WHITE
- if nearest is None:
- nearest = _otio_color_from_hue(hue)
- if nearest == otio.schema.MarkerColor.RED and lightness > 0.53:
- nearest = otio.schema.MarkerColor.PINK
- if (
- nearest == otio.schema.MarkerColor.MAGENTA
- and hue < 0.89
- and lightness < 0.42
- ):
- # some darker magentas look more like purple
- nearest = otio.schema.MarkerColor.PURPLE
-
- # default to red color
- return nearest or otio.schema.MarkerColor.RED
-
-
-def _find_timecode_mobs(item):
- mobs = [item.mob]
-
- for c in item.walk():
- if isinstance(c, aaf2.components.SourceClip):
- mob = c.mob
- if mob:
- mobs.append(mob)
- else:
- continue
- else:
- # This could be 'EssenceGroup', 'Pulldown' or other segment
- # subclasses
- # For example:
- # An EssenceGroup is a Segment that has one or more
- # alternate choices, each of which represent different variations
- # of one actual piece of content.
- # According to the AAF Object Specification and Edit Protocol
- # documents:
- # "Typically the different representations vary in essence format,
- # compression, or frame size. The application is responsible for
- # choosing the appropriate implementation of the essence."
- # It also says they should all have the same length, but
- # there might be nested Sequences inside which we're not attempting
- # to handle here (yet). We'll need a concrete example to ensure
- # we're doing the right thing.
- # TODO: Is the Timecode for an EssenceGroup correct?
- # TODO: Try CountChoices() and ChoiceAt(i)
- # For now, lets just skip it.
- continue
-
- return mobs
-
-
-def timecode_values_are_same(timecodes):
- """
- A SourceClip can have multiple timecode objects (for example an auxTC24
- value that got added via the Avid Bin column). As long as they have the
- same start and length values, they can be treated as being the same.
- """
- if len(timecodes) == 1:
- return True
-
- start_set = set()
- length_set = set()
-
- for timecode in timecodes:
- start_set.add(timecode.getvalue('Start'))
- length_set.add(timecode.getvalue('Length'))
-
- # If all timecode objects are having same start and length we can consider
- # them equivalent.
- if len(start_set) == 1 and len(length_set) == 1:
- return True
-
- return False
-
-
-def _extract_timecode_info(mob):
- """Given a mob with a single timecode slot, return the timecode and length
- in that slot as a tuple
- """
- timecodes = [slot.segment for slot in mob.slots
- if isinstance(slot.segment, aaf2.components.Timecode)]
-
- # Only use timecode if we have just one or multiple ones with same
- # start/length.
- if timecode_values_are_same(timecodes):
- timecode = timecodes[0]
- timecode_start = timecode.getvalue('Start')
- timecode_length = timecode.getvalue('Length')
-
- if timecode_start is None or timecode_length is None:
- raise otio.exceptions.NotSupportedError(
- "Unexpected timecode value(s) in mob named: `{}`."
- " `Start`: {}, `Length`: {}".format(mob.name,
- timecode_start,
- timecode_length)
- )
-
- return timecode_start, timecode_length
- elif len(timecodes) > 1:
- raise otio.exceptions.NotSupportedError(
- "Error: mob has more than one timecode slot with different values."
- " This is currently not supported by the AAF adapter. Found:"
- " {} slots, mob name is: '{}'".format(len(timecodes), mob.name)
- )
- else:
- return None
-
-
-def _add_child(parent, child, source):
- if child is None:
- if debug:
- print(f"Adding null child? {source}")
- elif isinstance(child, otio.schema.Marker):
- parent.markers.append(child)
- else:
- parent.append(child)
-
-
-def _transcribe(item, parents, edit_rate, indent=0):
- result = None
- metadata = {}
-
- # First lets grab some standard properties that are present on
- # many types of AAF objects...
- metadata["Name"] = _get_name(item)
- metadata["ClassName"] = _get_class_name(item)
-
- # Some AAF objects (like TimelineMobSlot) have an edit rate
- # which should be used for all of the object's children.
- # We will pass it on to any recursive calls to _transcribe()
- if hasattr(item, "edit_rate"):
- edit_rate = float(item.edit_rate)
-
- if isinstance(item, aaf2.components.Component):
- metadata["Length"] = item.length
-
- if isinstance(item, aaf2.core.AAFObject):
- for prop in item.properties():
- if hasattr(prop, 'name') and hasattr(prop, 'value'):
- key = str(prop.name)
- value = prop.value
- metadata[key] = _transcribe_property(value, owner=item)
-
- # Now we will use the item's class to determine which OTIO type
- # to transcribe into. Note that the order of this if/elif/... chain
- # is important, because the class hierarchy of AAF objects is more
- # complex than OTIO.
-
- if isinstance(item, aaf2.content.ContentStorage):
- msg = f"Creating SerializableCollection for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- result = otio.schema.SerializableCollection()
-
- for mob in item.compositionmobs():
- _transcribe_log("compositionmob traversal", indent)
- child = _transcribe(mob, parents + [item], edit_rate, indent + 2)
- _add_child(result, child, mob)
-
- elif isinstance(item, aaf2.mobs.Mob):
- _transcribe_log(f"Creating Timeline for {_encoded_name(item)}", indent)
- result = otio.schema.Timeline()
-
- for slot in item.slots:
- track = _transcribe(slot, parents + [item], edit_rate, indent + 2)
- _add_child(result.tracks, track, slot)
-
- # Use a heuristic to find the starting timecode from
- # this track and use it for the Timeline's global_start_time
- start_time = _find_timecode_track_start(track)
- if start_time:
- result.global_start_time = start_time
-
- elif isinstance(item, aaf2.components.SourceClip):
- clipUsage = None
- if item.mob is not None:
- clipUsage = item.mob.usage
-
- if clipUsage:
- itemMsg = "Creating SourceClip for {} ({})".format(
- _encoded_name(item), clipUsage
- )
- else:
- itemMsg = f"Creating SourceClip for {_encoded_name(item)}"
-
- _transcribe_log(itemMsg, indent)
- result = otio.schema.Clip()
-
- # store source mob usage to allow OTIO pipelines to adapt downstream
- # example: pipeline code adjusting source_range and name for subclips only
- metadata["SourceMobUsage"] = clipUsage or ""
-
- # Evidently the last mob is the one with the timecode
- mobs = _find_timecode_mobs(item)
-
- # Get the Timecode start and length values
- last_mob = mobs[-1] if mobs else None
- timecode_info = _extract_timecode_info(last_mob) if last_mob else None
-
- source_start = int(metadata.get("StartTime", "0"))
- source_length = item.length
- media_start = source_start
- media_length = item.length
-
- if timecode_info:
- media_start, media_length = timecode_info
- source_start += media_start
-
- # The goal here is to find a source range. Actual editorial opinions are
- # found on SourceClips in the CompositionMobs. To figure out whether this
- # clip is directly in the CompositionMob, we detect if our parent mobs
- # are only CompositionMobs. If they were anything else - a MasterMob, a
- # SourceMob, we would know that this is in some indirect relationship.
- parent_mobs = filter(lambda parent: isinstance(parent, aaf2.mobs.Mob), parents)
- is_directly_in_composition = all(
- isinstance(mob, aaf2.mobs.CompositionMob)
- for mob in parent_mobs
- )
- if is_directly_in_composition:
- result.source_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(source_start, edit_rate),
- otio.opentime.RationalTime(source_length, edit_rate)
- )
-
- # The goal here is to find an available range. Media ranges are stored
- # in the related MasterMob, and there should only be one - hence the name
- # "Master" mob. Somewhere down our chain (either a child or our parents)
- # is a MasterMob.
- # There are some special cases where the masterMob could be:
- # 1) For SourceClips in the CompositionMob, the mob the SourceClip is
- # referencing can be our MasterMob.
- # 2) If the source clip is referencing another CompositionMob,
- # drill down to see if the composition holds the MasterMob
- # 3) For everything else, it is a previously encountered parent. Find the
- # MasterMob in our chain, and then extract the information from that.
-
- child_mastermob, composition_user_metadata = \
- _find_mastermob_for_sourceclip(item)
-
- if composition_user_metadata:
- metadata['UserComments'] = composition_user_metadata
-
- parent_mastermobs = [
- parent for parent in parents
- if isinstance(parent, aaf2.mobs.MasterMob)
- ]
- parent_mastermob = parent_mastermobs[0] if len(parent_mastermobs) > 1 else None
-
- if child_mastermob:
- _transcribe_log("[found child_mastermob]", indent)
- elif parent_mastermob:
- _transcribe_log("[found parent_mastermob]", indent)
- else:
- _transcribe_log("[found no mastermob]", indent)
-
- mastermob = child_mastermob or parent_mastermob or None
-
- if mastermob:
- # Get target path
- mastermob_child = _transcribe(mastermob, list(), edit_rate, indent)
-
- target_path = (mastermob_child.metadata.get("AAF", {})
- .get("UserComments", {})
- .get("UNC Path"))
- if not target_path:
- # retrieve locator form the MasterMob's Essence
- for mobslot in mastermob.slots:
- if isinstance(mobslot.segment, aaf2.components.SourceClip):
- sourcemob = mobslot.segment.mob
- locator = None
- # different essences store locators in different places
- if (isinstance(sourcemob.descriptor,
- aaf2.essence.DigitalImageDescriptor)
- and sourcemob.descriptor.locator):
- locator = sourcemob.descriptor.locator[0]
- elif "Locator" in sourcemob.descriptor:
- locator = sourcemob.descriptor["Locator"].value[0]
-
- if locator:
- target_path = locator["URLString"].value
-
- # if we have target path, create an ExternalReference, otherwise
- # create an MissingReference.
- if target_path:
- if not target_path.startswith("file://"):
- target_path = "file://" + target_path
- target_path = target_path.replace("\\", "/")
- media = otio.schema.ExternalReference(target_url=target_path)
- else:
- media = otio.schema.MissingReference()
-
- media.available_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(media_start, edit_rate),
- otio.opentime.RationalTime(media_length, edit_rate)
- )
-
- # Copy the metadata from the master into the media_reference
- clip_metadata = copy.deepcopy(mastermob_child.metadata.get("AAF", {}))
-
- # If the composition was holding UserComments and the current masterMob has
- # no UserComments, use the ones from the CompositionMob. But if the
- # masterMob has any, prefer them over the compositionMob, since the
- # masterMob is the ultimate authority for a source clip.
- if composition_user_metadata:
- if "UserComments" not in clip_metadata:
- clip_metadata['UserComments'] = composition_user_metadata
-
- media.metadata["AAF"] = clip_metadata
-
- result.media_reference = media
-
- elif isinstance(item, aaf2.components.Transition):
- _transcribe_log("Creating Transition for {}".format(
- _encoded_name(item)), indent)
- result = otio.schema.Transition()
-
- # Does AAF support anything else?
- result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve
-
- # Extract value and time attributes of both ControlPoints used for
- # creating AAF Transition objects
- varying_value = None
- for param in item.getvalue('OperationGroup').parameters:
- if isinstance(param, aaf2.misc.VaryingValue):
- varying_value = param
- break
-
- if varying_value is not None:
- for control_point in varying_value.getvalue('PointList'):
- value = control_point.value
- time = control_point.time
- metadata.setdefault('PointList', []).append({'Value': value,
- 'Time': time})
-
- in_offset = int(metadata.get("CutPoint", "0"))
- out_offset = item.length - in_offset
- result.in_offset = otio.opentime.RationalTime(in_offset, edit_rate)
- result.out_offset = otio.opentime.RationalTime(out_offset, edit_rate)
-
- elif isinstance(item, aaf2.components.Filler):
- _transcribe_log(f"Creating Gap for {_encoded_name(item)}", indent)
- result = otio.schema.Gap()
-
- length = item.length
- result.source_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(0, edit_rate),
- otio.opentime.RationalTime(length, edit_rate)
- )
-
- elif isinstance(item, aaf2.components.NestedScope):
- msg = f"Creating Stack for NestedScope for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- # TODO: Is this the right class?
- result = otio.schema.Stack()
-
- for slot in item.slots:
- child = _transcribe(slot, parents + [item], edit_rate, indent + 2)
- _add_child(result, child, slot)
-
- elif isinstance(item, aaf2.components.Sequence):
- msg = f"Creating Track for Sequence for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- result = otio.schema.Track()
-
- # if parent is a sequence add SlotID / PhysicalTrackNumber to attach markers
- parent = parents[-1]
- if isinstance(parent, (aaf2.components.Sequence, aaf2.components.NestedScope)):
- timeline_slots = [
- p for p in parents if isinstance(p, aaf2.mobslots.TimelineMobSlot)
- ]
- timeline_slot = timeline_slots[-1]
- if timeline_slot:
- metadata["PhysicalTrackNumber"] = list(parent.slots).index(item) + 1
- metadata["SlotID"] = int(timeline_slot["SlotID"].value)
-
- for component in item.components:
- child = _transcribe(component, parents + [item], edit_rate, indent + 2)
- _add_child(result, child, component)
-
- elif isinstance(item, aaf2.components.OperationGroup):
- msg = f"Creating operationGroup for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- result = _transcribe_operation_group(item, parents, metadata,
- edit_rate, indent + 2)
-
- elif isinstance(item, aaf2.mobslots.TimelineMobSlot):
- msg = f"Creating Track for TimelineMobSlot for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- result = otio.schema.Track()
-
- child = _transcribe(item.segment, parents + [item], edit_rate, indent + 2)
-
- _add_child(result, child, item.segment)
-
- elif isinstance(item, aaf2.mobslots.MobSlot):
- msg = f"Creating Track for MobSlot for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- result = otio.schema.Track()
-
- child = _transcribe(item.segment, parents + [item], edit_rate, indent + 2)
- _add_child(result, child, item.segment)
-
- elif isinstance(item, aaf2.components.Timecode):
- pass
-
- elif isinstance(item, aaf2.components.Pulldown):
- pass
-
- elif isinstance(item, aaf2.components.EdgeCode):
- pass
-
- elif isinstance(item, aaf2.components.ScopeReference):
- msg = f"Creating Gap for ScopedReference for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
- # TODO: is this like FILLER?
-
- result = otio.schema.Gap()
-
- length = item.length
- result.source_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(0, edit_rate),
- otio.opentime.RationalTime(length, edit_rate)
- )
-
- elif isinstance(item, aaf2.components.DescriptiveMarker):
- event_mobs = [p for p in parents if isinstance(p, aaf2.mobslots.EventMobSlot)]
- if event_mobs:
- _transcribe_log(
- f"Create marker for '{_encoded_name(item)}'", indent
- )
-
- result = otio.schema.Marker()
- result.name = metadata["Comment"]
-
- event_mob = event_mobs[-1]
-
- metadata["AttachedSlotID"] = int(metadata["DescribedSlots"][0])
- metadata["AttachedPhysicalTrackNumber"] = int(
- event_mob["PhysicalTrackNumber"].value
- )
-
- # determine marker color
- color = _marker_color_from_string(
- metadata.get("CommentMarkerAttributeList", {}).get("_ATN_CRM_COLOR")
- )
- if color is None:
- color = _convert_rgb_to_marker_color(
- metadata["CommentMarkerColor"]
- )
- result.color = color
-
- position = metadata["Position"]
-
- # Length can be None, but the property will always exist
- # so get('Length', 1) wouldn't help.
- length = metadata["Length"]
- if length is None:
- length = 1
-
- result.marked_range = otio.opentime.TimeRange(
- start_time=otio.opentime.from_frames(position, edit_rate),
- duration=otio.opentime.from_frames(length, edit_rate),
- )
- else:
- _transcribe_log(
- "Cannot attach marker item '{}'. "
- "Missing event mob in hierarchy.".format(
- _encoded_name(item)
- )
- )
-
- elif isinstance(item, aaf2.components.Selector):
- msg = f"Transcribe selector for {_encoded_name(item)}"
- _transcribe_log(msg, indent)
-
- selected = item.getvalue('Selected')
- alternates = item.getvalue('Alternates', None)
-
- # First we check to see if the Selected component is either a Filler
- # or ScopeReference object, meaning we have to use the alternate instead
- if isinstance(selected, aaf2.components.Filler) or \
- isinstance(selected, aaf2.components.ScopeReference):
-
- # Safety check of the alternates list, then transcribe first object -
- # there should only ever be one alternate in this situation
- if alternates is None or len(alternates) != 1:
- err = "AAF Selector parsing error: object has unexpected number of " \
- "alternates - {}".format(len(alternates))
- raise AAFAdapterError(err)
- result = _transcribe(alternates[0], parents + [item], edit_rate, indent + 2)
-
- # Filler/ScopeReference means the clip is muted/not enabled
- result.enabled = False
-
- # Muted tracks are handled in a slightly odd way so we need to do a
- # check here and pass the param back up to the track object
- # if isinstance(parents[-1], aaf2.mobslots.TimelineMobSlot):
- # pass # TODO: Figure out mechanism for passing this up to parent
-
- else:
-
- # This is most likely a multi-cam clip
- result = _transcribe(selected, parents + [item], edit_rate, indent + 2)
-
- # Perform a check here to make sure no potential Gap objects
- # are slipping through the cracks
- if isinstance(result, otio.schema.Gap):
- err = f"AAF Selector parsing error: {type(item)}"
- raise AAFAdapterError(err)
-
- # A Selector can have a set of alternates to handle multiple options for an
- # editorial decision - we do a full parse on those obects too
- if alternates is not None:
- alternates = [
- _transcribe(alt, parents + [item], edit_rate, indent + 2)
- for alt in alternates
- ]
-
- metadata['alternates'] = alternates
-
- # @TODO: There are a bunch of other AAF object types that we will
- # likely need to add support for. I'm leaving this code here to help
- # future efforts to extract the useful information out of these.
-
- # elif isinstance(item, aaf.storage.File):
- # self.extendChildItems([item.header])
-
- # elif isinstance(item, aaf.storage.Header):
- # self.extendChildItems([item.storage()])
- # self.extendChildItems([item.dictionary()])
-
- # elif isinstance(item, aaf.dictionary.Dictionary):
- # l = []
- # l.append(DummyItem(list(item.class_defs()), 'ClassDefs'))
- # l.append(DummyItem(list(item.codec_defs()), 'CodecDefs'))
- # l.append(DummyItem(list(item.container_defs()), 'ContainerDefs'))
- # l.append(DummyItem(list(item.data_defs()), 'DataDefs'))
- # l.append(DummyItem(list(item.interpolation_defs()),
- # 'InterpolationDefs'))
- # l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs'))
- # l.append(DummyItem(list(item.operation_defs()), 'OperationDefs'))
- # l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs'))
- # l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs'))
- # l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs'))
- # l.append(DummyItem(list(item.type_defs()), 'TypeDefs'))
- # self.extendChildItems(l)
- #
- # elif isinstance(item, pyaaf.AxSelector):
- # self.extendChildItems(list(item.EnumAlternateSegments()))
- #
- # elif isinstance(item, pyaaf.AxScopeReference):
- # #print item, item.GetRelativeScope(),item.GetRelativeSlot()
- # pass
- #
- # elif isinstance(item, pyaaf.AxEssenceGroup):
- # segments = []
- #
- # for i in xrange(item.CountChoices()):
- # choice = item.GetChoiceAt(i)
- # segments.append(choice)
- # self.extendChildItems(segments)
- #
- # elif isinstance(item, pyaaf.AxProperty):
- # self.properties['Value'] = str(item.GetValue())
-
- elif isinstance(item, collections.abc.Iterable):
- msg = "Creating SerializableCollection for Iterable for {}".format(
- _encoded_name(item))
- _transcribe_log(msg, indent)
-
- result = otio.schema.SerializableCollection()
- for child in item:
- result.append(_transcribe(child, parents + [item], edit_rate, indent + 2))
- else:
- # For everything else, we just ignore it.
- # To see what is being ignored, turn on the debug flag
- if debug:
- print(f"SKIPPING: {type(item)}: {item} -- {result}")
-
- # Did we get anything? If not, we're done
- if result is None:
- return None
-
- # Okay, now we've turned the AAF thing into an OTIO result
- # There's a bit more we can do before we're ready to return the result.
-
- # If we didn't get a name yet, use the one we have in metadata
- if not result.name:
- result.name = metadata["Name"]
-
- # Attach the AAF metadata
- if not result.metadata:
- result.metadata.clear()
- result.metadata["AAF"] = metadata
-
- # Double check that we got the length we expected
- if isinstance(result, otio.core.Item):
- length = metadata.get("Length")
- if (
- length
- and result.source_range is not None
- and result.source_range.duration.value != length
- ):
- raise AAFAdapterError(
- "Wrong duration? {} should be {} in {}".format(
- result.source_range.duration.value,
- length,
- result
- )
- )
-
- # Did we find a Track?
- if isinstance(result, otio.schema.Track):
- # Try to figure out the kind of Track it is
- if hasattr(item, 'media_kind'):
- media_kind = str(item.media_kind)
- result.metadata["AAF"]["MediaKind"] = media_kind
- if media_kind == "Picture":
- result.kind = otio.schema.TrackKind.Video
- elif media_kind in ("SoundMasterTrack", "Sound"):
- result.kind = otio.schema.TrackKind.Audio
- else:
- # Timecode, Edgecode, others?
- result.kind = ""
-
- # Done!
- return result
-
-
-def _find_timecode_track_start(track):
- # See if we can find a starting timecode in here...
- aaf_metadata = track.metadata.get("AAF", {})
-
- # Is this a Timecode track?
- if aaf_metadata.get("MediaKind") not in {"Timecode", "LegacyTimecode"}:
- return
-
- # Edit Protocol section 3.6 specifies PhysicalTrackNumber of 1 as the
- # Primary timecode
- try:
- physical_track_number = aaf_metadata["PhysicalTrackNumber"]
- except KeyError:
- raise AAFAdapterError("Timecode missing 'PhysicalTrackNumber'")
-
- if physical_track_number != 1:
- return
-
- try:
- edit_rate = fractions.Fraction(aaf_metadata["EditRate"])
- start = aaf_metadata["Segment"]["Start"]
- except KeyError as e:
- raise AAFAdapterError(
- f"Timecode missing '{e}'"
- )
-
- if edit_rate.denominator == 1:
- rate = edit_rate.numerator
- else:
- rate = float(edit_rate)
-
- return otio.opentime.RationalTime(
- value=int(start),
- rate=rate,
- )
-
-
-def _find_mastermob_for_sourceclip(aaf_sourceclip):
- """
- For a given soure clip, find the related masterMob.
- Returns a tuple of (MasterMob, compositionMetadata), where
- MasterMob is an AAF MOB object and compositionMetadata a
- dictionary, extracted from the AAF Tagged Values of UserComments
- (i.e. user metadata)
- """
-
- # If the mobId of the sourceclip is a mastermob, just return that, we are done.
- if isinstance(aaf_sourceclip.mob, aaf2.mobs.MasterMob):
- return aaf_sourceclip.mob, None
-
- # There are cases where a composition mob is used as an indirection
- # to the mastermob. In that case the SourceClip points to a
- # CompositionMob instead of a MasterMob. Drill down into the CompositionMob
- # to find the MasterMob related to this SourceClip
- return _get_master_mob_from_source_composition(aaf_sourceclip.mob)
-
-
-def _get_master_mob_from_source_composition(compositionMob):
- """
- This code covers two special cases:
- if the passed in source-clip-mob is a composition, drill down
- and try to find the master mob in that composition.
-
- Also, there seems to be a workflow where metadata, specifically UserComments
- are shared between SourceClips via a CompositionMob, in which case there are
- no UserComments on the MasterMob (as we expect in the default case)
-
- So if we find UserComments on the Composition but not on the MasterMob, we
- return that metadata, so it can be added to the clip (instead of the
- master mob UserComments)
-
- """
-
- # If not a composition, we can't discover anything
- if not isinstance(compositionMob, aaf2.mobs.CompositionMob):
- return None, None
-
- compositionMetadata = _get_composition_user_comments(compositionMob)
-
- # Iterate over the TimelineMobSlots and extract any source_clips we find.
- source_clips = []
- for slot in compositionMob.slots:
- if isinstance(slot, aaf2.mobslots.TimelineMobSlot):
- if isinstance(slot.segment, aaf2.components.SourceClip):
- source_clips.append(slot.segment)
-
- # No source clips, no master mob. But we still want to return
- # the composition metadata. If there is another mastermob found 'upstream',
- # but it does not have any UserComments metadata, we still want to use
- # the CompositionMob's metadata.
- if not source_clips:
- return None, compositionMetadata
-
- # Only expect one source clip for this case.
- # Are there cases where we can have more than one?
- if len(source_clips) > 1:
- print("Found more than one Source Clip ({}) for sourceClipComposition case. "
- "This is unexpected".format(len(source_clips)))
-
- # We only look at the first source clip right now...
- source_clip = source_clips[0]
-
- # Not referencing a master mob? Nothing to return
- if not isinstance(source_clip.mob, aaf2.mobs.MasterMob):
- return None, compositionMetadata
-
- # Found a master mob, return this and also compositionMetadata (if we found any)
- return (source_clip.mob, compositionMetadata)
-
-
-def _get_composition_user_comments(compositionMob):
- compositionMetadata = {}
-
- if not isinstance(compositionMob, aaf2.mobs.CompositionMob):
- return compositionMetadata
-
- compositionMobUserComments = list(compositionMob.get("UserComments", []))
- for prop in compositionMobUserComments:
- key = str(prop.name)
- value = prop.value
- compositionMetadata[key] = _transcribe_property(value)
-
- return compositionMetadata
-
-
-def _transcribe_linear_timewarp(item, parameters):
- # this is a linear time warp
- effect = otio.schema.LinearTimeWarp()
-
- offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U')
-
- # If we have a LinearInterp with just 2 control points, then
- # we can compute the time_scalar. Note that the SpeedRatio is
- # NOT correct in many AAFs - we aren't sure why, but luckily we
- # can compute the correct value this way.
- points = offset_map.get("PointList")
- if len(points) > 2:
- # This is something complicated... try the fancy version
- return _transcribe_fancy_timewarp(item, parameters)
- elif (
- len(points) == 2
- and float(points[0].time) == 0
- and float(points[0].value) == 0
- ):
- # With just two points, we can compute the slope
- effect.time_scalar = float(points[1].value) / float(points[1].time)
- else:
- # Fall back to the SpeedRatio if we didn't understand the points
- ratio = parameters.get("SpeedRatio")
- if ratio == str(item.length):
- # If the SpeedRatio == the length, this is a freeze frame
- effect.time_scalar = 0
- elif '/' in ratio:
- numerator, denominator = map(float, ratio.split('/'))
- # OTIO time_scalar is 1/x from AAF's SpeedRatio
- effect.time_scalar = denominator / numerator
- else:
- effect.time_scalar = 1.0 / float(ratio)
-
- # Is this is a freeze frame?
- if effect.time_scalar == 0:
- # Note: we might end up here if any of the code paths above
- # produced a 0 time_scalar.
- # Use the FreezeFrame class instead of LinearTimeWarp
- effect = otio.schema.FreezeFrame()
-
- return effect
-
-
-def _transcribe_fancy_timewarp(item, parameters):
-
- # For now, this is an unsupported time effect...
- effect = otio.schema.TimeEffect()
- effect.effect_name = ""
- effect.name = item.get("Name", "")
-
- return effect
-
- # TODO: Here is some sample code that pulls out the full
- # details of a non-linear speed map.
-
- # speed_map = item.parameter['PARAM_SPEED_MAP_U']
- # offset_map = item.parameter['PARAM_SPEED_OFFSET_MAP_U']
- # Also? PARAM_OFFSET_MAP_U (without the word "SPEED" in it?)
- # print(speed_map['PointList'].value)
- # print(speed_map.count())
- # print(speed_map.interpolation_def().name)
- #
- # for p in speed_map.points():
- # print(" ", float(p.time), float(p.value), p.edit_hint)
- # for prop in p.point_properties():
- # print(" ", prop.name, prop.value, float(prop.value))
- #
- # print(offset_map.interpolation_def().name)
- # for p in offset_map.points():
- # edit_hint = p.edit_hint
- # time = p.time
- # value = p.value
- #
- # pass
- # # print " ", float(p.time), float(p.value)
- #
- # for i in range(100):
- # float(offset_map.value_at("%i/100" % i))
- #
- # # Test file PARAM_SPEED_MAP_U is AvidBezierInterpolator
- # # currently no implement for value_at
- # try:
- # speed_map.value_at(.25)
- # except NotImplementedError:
- # pass
- # else:
- # raise
-
-
-def _transcribe_operation_group(item, parents, metadata, edit_rate, indent):
- result = otio.schema.Stack()
-
- operation = metadata.get("Operation", {})
- parameters = metadata.get("Parameters", {})
- result.name = operation.get("Name")
-
- # Trust the length that is specified in the AAF
- length = metadata.get("Length")
- result.source_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(0, edit_rate),
- otio.opentime.RationalTime(length, edit_rate)
- )
-
- # Look for speed effects...
- effect = None
- if operation.get("IsTimeWarp"):
- if operation.get("Name") == "Motion Control":
-
- offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U')
- # TODO: We should also check the PARAM_OFFSET_MAP_U which has
- # an interpolation_def().name as well.
- if offset_map is not None:
- interpolation = offset_map.interpolation.name
- else:
- interpolation = None
-
- if interpolation == "LinearInterp":
- effect = _transcribe_linear_timewarp(item, parameters)
- else:
- effect = _transcribe_fancy_timewarp(item, parameters)
-
- else:
- # Unsupported time effect
- effect = otio.schema.TimeEffect()
- effect.effect_name = ""
- effect.name = operation.get("Name")
- else:
- # Unsupported effect
- effect = otio.schema.Effect()
- effect.effect_name = ""
- effect.name = operation.get("Name")
-
- if effect is not None:
- result.effects.append(effect)
-
- effect.metadata.clear()
- effect.metadata.update({
- "AAF": {
- "Operation": operation,
- "Parameters": parameters
- }
- })
-
- for segment in item.getvalue("InputSegments"):
- child = _transcribe(segment, parents + [item], edit_rate, indent)
- if child:
- _add_child(result, child, segment)
-
- return result
-
-
-def _fix_transitions(thing):
- if isinstance(thing, otio.schema.Timeline):
- _fix_transitions(thing.tracks)
- elif (
- isinstance(thing, otio.core.Composition)
- or isinstance(thing, otio.schema.SerializableCollection)
- ):
- if isinstance(thing, otio.schema.Track):
- for c, child in enumerate(thing):
-
- # Don't touch the Transitions themselves,
- # only the Clips & Gaps next to them.
- if not isinstance(child, otio.core.Item):
- continue
-
- # Was the item before us a Transition?
- if c > 0 and isinstance(
- thing[c - 1],
- otio.schema.Transition
- ):
- pre_trans = thing[c - 1]
-
- if child.source_range is None:
- child.source_range = child.trimmed_range()
- csr = child.source_range
- child.source_range = otio.opentime.TimeRange(
- start_time=csr.start_time + pre_trans.in_offset,
- duration=csr.duration - pre_trans.in_offset
- )
-
- # Is the item after us a Transition?
- if c < len(thing) - 1 and isinstance(
- thing[c + 1],
- otio.schema.Transition
- ):
- post_trans = thing[c + 1]
-
- if child.source_range is None:
- child.source_range = child.trimmed_range()
- csr = child.source_range
- child.source_range = otio.opentime.TimeRange(
- start_time=csr.start_time,
- duration=csr.duration - post_trans.out_offset
- )
-
- for child in thing:
- _fix_transitions(child)
-
-
-def _attach_markers(collection):
- """Search for markers on tracks and attach them to their corresponding item.
-
- Marked ranges will also be transformed into the new parent space.
-
- """
- # iterate all timeline objects
- for timeline in collection.find_children(descended_from_type=otio.schema.Timeline):
- tracks_map = {}
-
- # build track mapping
- for track in timeline.find_children(descended_from_type=otio.schema.Track):
- metadata = track.metadata.get("AAF", {})
- slot_id = metadata.get("SlotID")
- track_number = metadata.get("PhysicalTrackNumber")
- if slot_id is None or track_number is None:
- continue
-
- tracks_map[(int(slot_id), int(track_number))] = track
-
- # iterate all tracks for their markers and attach them to the matching item
- for current_track in timeline.find_children(
- descended_from_type=otio.schema.Track):
- for marker in list(current_track.markers):
- metadata = marker.metadata.get("AAF", {})
- slot_id = metadata.get("AttachedSlotID")
- track_number = metadata.get("AttachedPhysicalTrackNumber")
- target_track = tracks_map.get((slot_id, track_number))
- if target_track is None:
- raise AAFAdapterError(
- "Marker '{}' cannot be attached to an item. SlotID: '{}', "
- "PhysicalTrackNumber: '{}'".format(
- marker.name, slot_id, track_number
- )
- )
-
- # remove marker from current parent track
- current_track.markers.remove(marker)
-
- # determine new item to attach the marker to
- try:
- target_item = target_track.child_at_time(
- marker.marked_range.start_time
- )
-
- if target_item is None or not hasattr(target_item, 'markers'):
- # Item found cannot have markers, for example Transition.
- # See also `marker-over-transition.aaf` in test data.
- #
- # Leave markers on the track for now.
- _transcribe_log(
- 'Skip target_item `{}` cannot have markers'.format(
- target_item,
- ),
- )
- target_item = target_track
-
- # transform marked range into new item range
- marked_start_local = current_track.transformed_time(
- marker.marked_range.start_time, target_item
- )
-
- marker.marked_range = otio.opentime.TimeRange(
- start_time=marked_start_local,
- duration=marker.marked_range.duration
- )
-
- except otio.exceptions.CannotComputeAvailableRangeError as e:
- # For audio media AAF file (marker-over-audio.aaf),
- # this exception would be triggered in:
- # `target_item = target_track.child_at_time()` with error
- # message:
- # "No available_range set on media reference on clip".
- #
- # Leave markers on the track for now.
- _transcribe_log(
- 'Cannot compute availableRange from {} to {}: {}'.format(
- marker,
- target_track,
- e,
- ),
- )
- target_item = target_track
-
- # attach marker to target item
- target_item.markers.append(marker)
-
- _transcribe_log(
- "Marker: '{}' (time: {}), attached to item: '{}'".format(
- marker.name,
- marker.marked_range.start_time.value,
- target_item.name,
- )
- )
-
- return collection
-
-
-def _simplify(thing):
- # If the passed in is an empty dictionary or None, nothing to do.
- # Without this check it would still return thing, but this way we avoid
- # unnecessary if-chain compares.
- if not thing:
- return thing
-
- if isinstance(thing, otio.schema.SerializableCollection):
- if len(thing) == 1:
- return _simplify(thing[0])
- else:
- for c, child in enumerate(thing):
- thing[c] = _simplify(child)
- return thing
-
- elif isinstance(thing, otio.schema.Timeline):
- result = _simplify(thing.tracks)
-
- # Only replace the Timeline's stack if the simplified result
- # was also a Stack. Otherwise leave it (the contents will have
- # been simplified in place).
- if isinstance(result, otio.schema.Stack):
- thing.tracks = result
-
- return thing
-
- elif isinstance(thing, otio.core.Composition):
- # simplify our children
- for c, child in enumerate(thing):
- thing[c] = _simplify(child)
-
- # remove empty children of Stacks
- if isinstance(thing, otio.schema.Stack):
- for c in reversed(range(len(thing))):
- child = thing[c]
- if not _contains_something_valuable(child):
- # TODO: We're discarding metadata... should we retain it?
- del thing[c]
-
- # Look for Stacks within Stacks
- c = len(thing) - 1
- while c >= 0:
- child = thing[c]
- # Is my child a Stack also? (with no effects)
- if (
- not _has_effects(child)
- and
- (
- isinstance(child, otio.schema.Stack)
- or (
- isinstance(child, otio.schema.Track)
- and len(child) == 1
- and isinstance(child[0], otio.schema.Stack)
- and child[0]
- and isinstance(child[0][0], otio.schema.Track)
- )
- )
- ):
- if isinstance(child, otio.schema.Track):
- child = child[0]
-
- # Pull the child's children into the parent
- num = len(child)
- children_of_child = child[:]
- # clear out the ownership of 'child'
- del child[:]
- thing[c:c + 1] = children_of_child
-
- # TODO: We may be discarding metadata, should we merge it?
- # TODO: Do we need to offset the markers in time?
- thing.markers.extend(child.markers)
- # Note: we don't merge effects, because we already made
- # sure the child had no effects in the if statement above.
-
- # Preserve the enabled/disabled state as we merge these two.
- thing.enabled = thing.enabled and child.enabled
-
- c = c + num
- c = c - 1
-
- # skip redundant containers
- if _is_redundant_container(thing):
- # TODO: We may be discarding metadata here, should we merge it?
- result = thing[0].deepcopy()
-
- # As we are reducing the complexity of the object structure through
- # this process, we need to make sure that any/all enabled statuses
- # are being respected and applied in an appropriate way
- if not thing.enabled:
- result.enabled = False
-
- # TODO: Do we need to offset the markers in time?
- result.markers.extend(thing.markers)
-
- # TODO: The order of the effects is probably important...
- # should they be added to the end or the front?
- # Intuitively it seems like the child's effects should come before
- # the parent's effects. This will need to be solidified when we
- # add more effects support.
- result.effects.extend(thing.effects)
- # Keep the parent's length, if it has one
- if thing.source_range:
- # make sure it has a source_range first
- if not result.source_range:
- try:
- result.source_range = result.trimmed_range()
- except otio.exceptions.CannotComputeAvailableRangeError:
- result.source_range = copy.copy(thing.source_range)
- # modify the duration, but leave the start_time as is
- result.source_range = otio.opentime.TimeRange(
- result.source_range.start_time,
- thing.source_range.duration
- )
- return result
-
- # if thing is the top level stack, all of its children must be in tracks
- if isinstance(thing, otio.schema.Stack) and thing.parent() is None:
- children_needing_tracks = []
- for child in thing:
- if isinstance(child, otio.schema.Track):
- continue
- children_needing_tracks.append(child)
-
- for child in children_needing_tracks:
- orig_index = thing.index(child)
- del thing[orig_index]
- new_track = otio.schema.Track()
- new_track.append(child)
- thing.insert(orig_index, new_track)
-
- return thing
-
-
-def _has_effects(thing):
- if isinstance(thing, otio.core.Item):
- if len(thing.effects) > 0:
- return True
-
-
-def _is_redundant_container(thing):
-
- is_composition = isinstance(thing, otio.core.Composition)
- if not is_composition:
- return False
-
- has_one_child = len(thing) == 1
- if not has_one_child:
- return False
-
- am_top_level_track = (
- type(thing) is otio.schema.Track
- and type(thing.parent()) is otio.schema.Stack
- and thing.parent().parent() is None
- )
-
- return (
- not am_top_level_track
- # am a top level track but my only child is a track
- or (
- type(thing) is otio.schema.Track
- and type(thing[0]) is otio.schema.Track
- )
- )
-
-
-def _contains_something_valuable(thing):
- if isinstance(thing, otio.core.Item):
- if len(thing.effects) > 0 or len(thing.markers) > 0:
- return True
-
- if isinstance(thing, otio.core.Composition):
-
- if len(thing) == 0:
- # NOT valuable because it is empty
- return False
-
- for child in thing:
- if _contains_something_valuable(child):
- # valuable because this child is valuable
- return True
-
- # none of the children were valuable, so thing is NOT valuable
- return False
-
- if isinstance(thing, otio.schema.Gap):
- # TODO: Are there other valuable things we should look for on a Gap?
- return False
-
- # anything else is presumed to be valuable
- return True
-
-
-def _get_mobs_for_transcription(storage):
- """
- When we describe our AAF into OTIO space, we apply the following heuristic:
-
- 1) First look for top level mobs and if found use that to transcribe.
-
- 2) If we don't have top level mobs, look for composition mobs and use them to
- transcribe.
-
- 3) Lastly if we don't have either, try to use master mobs to transcribe.
-
- If we don't find any Mobs, just tell the user and do transcrption on an empty
- list (to generate some 'empty-level' OTIO structure)
-
- This heuristic is based on 'real-world' examples. There may still be some
- corner cases / open questions (like could there be metadata on both
- a composition mob and master mob? And if so, who would 'win'?)
-
- In any way, this heuristic satisfies the current set of AAFs we are using
- in our test-environment.
-
- """
-
- top_level_mobs = list(storage.toplevel())
-
- if len(top_level_mobs) > 0:
- _transcribe_log("---\nTranscribing top level mobs\n---")
- return top_level_mobs
-
- composition_mobs = list(storage.compositionmobs())
- if len(composition_mobs) > 0:
- _transcribe_log("---\nTranscribing composition mobs\n---")
- return composition_mobs
-
- master_mobs = list(storage.mastermobs())
- if len(master_mobs) > 0:
- _transcribe_log("---\nTranscribing master mobs\n---")
- return master_mobs
-
- _transcribe_log("---\nNo mobs found to transcribe\n---")
-
- return []
-
-
-def read_from_file(
- filepath,
- simplify=True,
- transcribe_log=False,
- attach_markers=True,
- bake_keyframed_properties=False
-):
- """Reads AAF content from `filepath` and outputs an OTIO timeline object.
-
- Args:
- filepath (str): AAF filepath
- simplify (bool, optional): simplify timeline structure by stripping empty items
- transcribe_log (bool, optional): log activity as items are getting transcribed
- attach_markers (bool, optional): attaches markers to their appropriate items
- like clip, gap. etc on the track
- bake_keyframed_properties (bool, optional): bakes animated property values
- for each frame in a source clip
- Returns:
- otio.schema.Timeline
-
- """
- # 'activate' transcribe logging if adapter argument is provided.
- # Note that a global 'switch' is used in order to avoid
- # passing another argument around in the _transcribe() method.
- #
- global _TRANSCRIBE_DEBUG, _BAKE_KEYFRAMED_PROPERTIES_VALUES
- _TRANSCRIBE_DEBUG = transcribe_log
- _BAKE_KEYFRAMED_PROPERTIES_VALUES = bake_keyframed_properties
-
- with aaf2.open(filepath) as aaf_file:
- # Note: We're skipping: aaf_file.header
- # Is there something valuable in there?
-
- storage = aaf_file.content
- mobs_to_transcribe = _get_mobs_for_transcription(storage)
-
- result = _transcribe(mobs_to_transcribe, parents=list(), edit_rate=None)
-
- # Attach marker to the appropriate clip, gap etc.
- if attach_markers:
- result = _attach_markers(result)
-
- # AAF is typically more deeply nested than OTIO.
- # Let's try to simplify the structure by collapsing or removing
- # unnecessary stuff.
- if simplify:
- result = _simplify(result)
-
- # OTIO represents transitions a bit different than AAF, so
- # we need to iterate over them and modify the items on either side.
- # Note that we do this *after* simplifying, since the structure
- # may change during simplification.
- _fix_transitions(result)
-
- # Reset transcribe_log debugging
- _TRANSCRIBE_DEBUG = False
-
- return result
-
-
-def write_to_file(input_otio, filepath, **kwargs):
-
- with aaf2.open(filepath, "w") as f:
-
- timeline = aaf_writer._stackify_nested_groups(input_otio)
-
- aaf_writer.validate_metadata(timeline)
-
- otio2aaf = aaf_writer.AAFFileTranscriber(timeline, f, **kwargs)
-
- if not isinstance(timeline, otio.schema.Timeline):
- raise otio.exceptions.NotSupportedError(
- "Currently only supporting top level Timeline")
-
- for otio_track in timeline.tracks:
- # Ensure track must have clip to get the edit_rate
- if len(otio_track) == 0:
- continue
-
- transcriber = otio2aaf.track_transcriber(otio_track)
-
- for otio_child in otio_track:
- result = transcriber.transcribe(otio_child)
- if result:
- transcriber.sequence.components.append(result)
diff --git a/contrib/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json b/contrib/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json
index 31fc45c9e9..0e79658aaa 100644
--- a/contrib/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json
+++ b/contrib/opentimelineio_contrib/adapters/contrib_adapters.plugin_manifest.json
@@ -31,12 +31,6 @@
"filepath" : "burnins.py",
"suffixes" : []
},
- {
- "OTIO_SCHEMA" : "Adapter.1",
- "name" : "AAF",
- "filepath" : "advanced_authoring_format.py",
- "suffixes" : ["aaf"]
- },
{
"OTIO_SCHEMA": "Adapter.1",
"name": "xges",
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/2997fps-DFTC.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/2997fps-DFTC.aaf
deleted file mode 100644
index f0c206cf33..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/2997fps-DFTC.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/2997fps.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/2997fps.aaf
deleted file mode 100644
index 9add76b0c2..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/2997fps.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/30fps.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/30fps.aaf
deleted file mode 100644
index 10d06dfbcf..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/30fps.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/composite.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/composite.aaf
deleted file mode 100755
index c24f00b3a6..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/composite.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/duplicates.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/duplicates.aaf
deleted file mode 100644
index 2f75615405..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/duplicates.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/essence_group.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/essence_group.aaf
deleted file mode 100644
index 7802e22011..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/essence_group.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/keyframed_properties.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/keyframed_properties.aaf
deleted file mode 100755
index ca7551bbb9..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/keyframed_properties.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/linear_speed_effects.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/linear_speed_effects.aaf
deleted file mode 100644
index 2e9b4affd3..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/linear_speed_effects.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/linear_speed_effects_aaf.mov b/contrib/opentimelineio_contrib/adapters/tests/sample_data/linear_speed_effects_aaf.mov
deleted file mode 100644
index ad1c5c7a11..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/linear_speed_effects_aaf.mov and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/marker-over-audio.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/marker-over-audio.aaf
deleted file mode 100644
index d0d335344c..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/marker-over-audio.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/marker-over-transition.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/marker-over-transition.aaf
deleted file mode 100644
index bf9174d4ec..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/marker-over-transition.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/misc_speed_effects.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/misc_speed_effects.aaf
deleted file mode 100644
index a39847d3a6..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/misc_speed_effects.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/misc_speed_effects_aaf.mov b/contrib/opentimelineio_contrib/adapters/tests/sample_data/misc_speed_effects_aaf.mov
deleted file mode 100644
index 27a16e060c..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/misc_speed_effects_aaf.mov and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_markers.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_markers.aaf
deleted file mode 100755
index 3855ed7f5e..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_markers.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_timecode_objects.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_timecode_objects.aaf
deleted file mode 100644
index f614c016b1..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_timecode_objects.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_top_level_mobs.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_top_level_mobs.aaf
deleted file mode 100644
index a2cf2b94fe..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multiple_top_level_mobs.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multitrack.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/multitrack.aaf
deleted file mode 100644
index 4903865d61..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/multitrack.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/nested_stack.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/nested_stack.aaf
deleted file mode 100755
index 76f317d487..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/nested_stack.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/nesting_test.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/nesting_test.aaf
deleted file mode 100644
index 2dfe2d0099..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/nesting_test.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/nesting_test_preflattened.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/nesting_test_preflattened.aaf
deleted file mode 100644
index 1ff9b57049..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/nesting_test_preflattened.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/no_metadata.otio b/contrib/opentimelineio_contrib/adapters/tests/sample_data/no_metadata.otio
deleted file mode 100644
index 279d97da4a..0000000000
--- a/contrib/opentimelineio_contrib/adapters/tests/sample_data/no_metadata.otio
+++ /dev/null
@@ -1,151 +0,0 @@
-{
- "OTIO_SCHEMA": "Timeline.1",
- "metadata": {},
- "name": "OTIO_Test_ppjoshm1.Exported.01",
- "tracks": {
- "OTIO_SCHEMA": "Stack.1",
- "children": [
- {
- "OTIO_SCHEMA": "Track.1",
- "children": [
- {
- "OTIO_SCHEMA": "Clip.1",
- "effects": [],
- "markers": [],
- "media_reference": {
- "OTIO_SCHEMA": "ExternalReference.1",
- "available_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 192
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 1
- }
- },
- "metadata": {},
- "name": null,
- "target_url": "sample_data/one_clip.aaf"
- },
- "metadata": {
- "example_studio": {
- "OTIO_SCHEMA": "ExampleStudioMetadata.1",
- "cache": {
- "hitech": {
- "OTIO_SCHEMA": "ExampleDatabase.1",
- "shot": null,
- "take": null
- }
- },
- "take": {
- "OTIO_SCHEMA": "ExampleStudioTake.1",
- "globaltake": 1,
- "prod": "ppjoshm",
- "shot": "ppjoshm_1",
- "unit": "none"
- }
- }
- },
- "name": "ppjoshm_1 (SIM1)",
- "source_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 101
- }
- }
- }
- ],
- "effects": [],
- "kind": "Video",
- "markers": [],
- "metadata": {},
- "name": "TimelineMobSlot",
- "source_range": null
- },
- {
- "OTIO_SCHEMA": "Track.1",
- "children": [
- {
- "OTIO_SCHEMA": "Clip.1",
- "effects": [],
- "markers": [],
- "media_reference": {
- "OTIO_SCHEMA": "ExternalReference.1",
- "available_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 192
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 1
- }
- },
- "metadata": {},
- "name": null,
- "target_url": "sample_data/one_clip.aaf"
- },
- "metadata": {
- "example_studio": {
- "OTIO_SCHEMA": "ExampleStudioMetadata.1",
- "cache": {
- "hitech": {
- "OTIO_SCHEMA": "ExampleDatabase.1",
- "shot": null,
- "take": null
- }
- },
- "take": {
- "OTIO_SCHEMA": "ExampleStudioTake.1",
- "globaltake": 1,
- "prod": "ppjoshm",
- "shot": "ppjoshm_1",
- "unit": "none"
- }
- }
- },
- "name": "ppjoshm_1 (SIM1)",
- "source_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 0
- }
- }
- }
- ],
- "effects": [],
- "kind": "Audio",
- "markers": [],
- "metadata": {},
- "name": "TimelineMobSlot",
- "source_range": null
- }
- ],
- "effects": [],
- "markers": [],
- "metadata": {},
- "name": "tracks",
- "source_range": null
- }
-}
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/normalclip_sourceclip_references_compositionmob_has_also_mastermob_usercomments.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/normalclip_sourceclip_references_compositionmob_has_also_mastermob_usercomments.aaf
deleted file mode 100644
index 18271a7dc9..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/normalclip_sourceclip_references_compositionmob_has_also_mastermob_usercomments.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/normalclip_sourceclip_references_compositionmob_with_usercomments_no_mastermob_usercomments.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/normalclip_sourceclip_references_compositionmob_with_usercomments_no_mastermob_usercomments.aaf
deleted file mode 100644
index 0f03ce8893..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/normalclip_sourceclip_references_compositionmob_with_usercomments_no_mastermob_usercomments.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/not_aaf.otio b/contrib/opentimelineio_contrib/adapters/tests/sample_data/not_aaf.otio
deleted file mode 100644
index 36664d20e7..0000000000
--- a/contrib/opentimelineio_contrib/adapters/tests/sample_data/not_aaf.otio
+++ /dev/null
@@ -1,151 +0,0 @@
-{
- "OTIO_SCHEMA": "Timeline.1",
- "metadata": {},
- "name": "OTIO_Test_ppjoshm1.Exported.01",
- "tracks": {
- "OTIO_SCHEMA": "Stack.1",
- "children": [
- {
- "OTIO_SCHEMA": "Track.1",
- "children": [
- {
- "OTIO_SCHEMA": "Clip.1",
- "effects": [],
- "markers": [],
- "media_reference": {
- "OTIO_SCHEMA": "ExternalReference.1",
- "available_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 192
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 1
- }
- },
- "metadata": {},
- "name": null,
- "target_url": "sample_data/one_clip.mov"
- },
- "metadata": {
- "example_studio": {
- "OTIO_SCHEMA": "ExampleStudioMetadata.1",
- "cache": {
- "hitech": {
- "OTIO_SCHEMA": "ExampleDatabase.1",
- "shot": null,
- "take": null
- }
- },
- "take": {
- "OTIO_SCHEMA": "ExampleStudioTake.1",
- "globaltake": 1,
- "prod": "ppjoshm",
- "shot": "ppjoshm_1",
- "unit": "none"
- }
- }
- },
- "name": "ppjoshm_1 (SIM1)",
- "source_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 101
- }
- }
- }
- ],
- "effects": [],
- "kind": "Video",
- "markers": [],
- "metadata": {},
- "name": "TimelineMobSlot",
- "source_range": null
- },
- {
- "OTIO_SCHEMA": "Track.1",
- "children": [
- {
- "OTIO_SCHEMA": "Clip.1",
- "effects": [],
- "markers": [],
- "media_reference": {
- "OTIO_SCHEMA": "ExternalReference.1",
- "available_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 192
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24,
- "value": 1
- }
- },
- "metadata": {},
- "name": null,
- "target_url": "sample_data/one_clip.mov"
- },
- "metadata": {
- "example_studio": {
- "OTIO_SCHEMA": "ExampleStudioMetadata.1",
- "cache": {
- "hitech": {
- "OTIO_SCHEMA": "ExampleDatabase.1",
- "shot": null,
- "take": null
- }
- },
- "take": {
- "OTIO_SCHEMA": "ExampleStudioTake.1",
- "globaltake": 1,
- "prod": "ppjoshm",
- "shot": "ppjoshm_1",
- "unit": "none"
- }
- }
- },
- "name": "ppjoshm_1 (SIM1)",
- "source_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 0
- }
- }
- }
- ],
- "effects": [],
- "kind": "Audio",
- "markers": [],
- "metadata": {},
- "name": "TimelineMobSlot",
- "source_range": null
- }
- ],
- "effects": [],
- "markers": [],
- "metadata": {},
- "name": "tracks",
- "source_range": null
- }
-}
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/one_audio_clip.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/one_audio_clip.aaf
deleted file mode 100755
index 35de338922..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/one_audio_clip.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/one_clip.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/one_clip.aaf
deleted file mode 100644
index edcbf4d658..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/one_clip.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/precheckfail.otio b/contrib/opentimelineio_contrib/adapters/tests/sample_data/precheckfail.otio
deleted file mode 100644
index 1f895f30c2..0000000000
--- a/contrib/opentimelineio_contrib/adapters/tests/sample_data/precheckfail.otio
+++ /dev/null
@@ -1,234 +0,0 @@
-{
- "OTIO_SCHEMA": "Timeline.1",
- "global_start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 86400
- },
- "metadata": {
- "AAF": {
- "ClassName": "CompositionMob",
- "CreationTime": "2019-03-29 18:55:55",
- "LastModified": "2019-03-29 18:55:14",
- "MobAttributeList": {
- "AudioPluginWindowTrack": 1,
- "PRJ_BOUNDARY_FRAMES": 1,
- "SEQUERNCE_FORMAT_STRING": "HD 1080p/24",
- "SEQUERNCE_FORMAT_TYPE": 10,
- "_IMAGE_BOUNDS_OVERRIDE": " ",
- "_USER_POS": 10,
- "_VERSION": 2
- },
- "MobID": "urn:smpte:umid:060a2b34.01010101.01010f00.13000000.060e2b34.7f7f2a80.5c9e6a3b.ace913a2",
- "Name": "OTIO_Test_ppjoshm1.Exported.01",
- "Slots": {},
- "UsageCode": "Usage_TopLevel"
- }
- },
- "name": "OTIO_Test_ppjoshm1.Exported.01",
- "tracks": {
- "OTIO_SCHEMA": "Stack.1",
- "children": [
- {
- "OTIO_SCHEMA": "Track.1",
- "children": [
- {
- "OTIO_SCHEMA": "Clip.1",
- "effects": [],
- "markers": [],
- "media_reference": {
- "OTIO_SCHEMA": "MissingReference.1",
- "available_range": null,
- "metadata": {
- "AAF": {
- "ClassName": "MasterMob",
- "ConvertFrameRate": false,
- "CreationTime": "2019-03-29 18:52:18",
- "LastModified": "2019-03-29 18:54:01",
- "MobAttributeList": {
- "_GEN": 1553885640,
- "_IMPORTSETTING": "__AttributeList",
- "_SAVED_AAF_AUDIO_LENGTH": 0,
- "_SAVED_AAF_AUDIO_RATE_DEN": 1,
- "_SAVED_AAF_AUDIO_RATE_NUM": 24,
- "_USER_POS": 0,
- "_VERSION": 2
- },
- "MobID": "urn:smpte:umid:060a2b34.01010101.01010f00.13000000.060e2b34.7f7f2a80.5c9e6962.cd005cc5",
- "Name": "ppjoshm_1 (SIM1)",
- "Slots": {}
- }
- },
- "name": null
- },
- "metadata": {
- "AAF": {
- "ClassName": "SourceClip",
- "ComponentAttributeList": {
- "_IMAGE_BOUNDS_OVERRIDE": " -800/1 -450/1 1600/1 900/1 -800/1 -450/1 1600/1 900/1 -800/1 -450/1 1600/1 900/1 "
- },
- "DataDefinition": {
- "Description": "Picture Essence",
- "Identification": "01030202-0100-0000-060e-2b3404010101",
- "Name": "Picture"
- },
- "Length": 10,
- "Name": "ppjoshm_1 (SIM1)",
- "SourceID": "urn:smpte:umid:060a2b34.01010101.01010f00.13000000.060e2b34.7f7f2a80.5c9e6962.cd005cc5",
- "SourceMobSlotID": 1,
- "StartTime": 0
- }
- },
- "name": "ppjoshm_1 (SIM1)",
- "source_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 86501
- }
- }
- }
- ],
- "effects": [],
- "kind": "Video",
- "markers": [],
- "metadata": {
- "AAF": {
- "ClassName": "TimelineMobSlot",
- "EditRate": "24",
- "MediaKind": "Picture",
- "Name": "TimelineMobSlot",
- "Origin": 0,
- "PhysicalTrackNumber": 1,
- "Segment": {
- "Components": {},
- "DataDefinition": {
- "Description": "Picture Essence",
- "Identification": "01030202-0100-0000-060e-2b3404010101",
- "Name": "Picture"
- },
- "Length": 10
- },
- "SlotID": 9,
- "SlotName": ""
- }
- },
- "name": "TimelineMobSlot",
- "source_range": null
- },
- {
- "OTIO_SCHEMA": "Track.1",
- "children": [
- {
- "OTIO_SCHEMA": "Clip.1",
- "effects": [],
- "markers": [],
- "media_reference": {
- "OTIO_SCHEMA": "MissingReference.1",
- "available_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 48.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 48.0,
- "value": 0
- }
- },
- "metadata": {
- "AAF": {
- "ClassName": "MasterMob",
- "ConvertFrameRate": false,
- "CreationTime": "2019-03-29 18:52:18",
- "LastModified": "2019-03-29 18:54:01",
- "MobAttributeList": {
- "_GEN": 1553885640,
- "_IMPORTSETTING": "__AttributeList",
- "_SAVED_AAF_AUDIO_LENGTH": 0,
- "_SAVED_AAF_AUDIO_RATE_DEN": 1,
- "_SAVED_AAF_AUDIO_RATE_NUM": 24,
- "_USER_POS": 0,
- "_VERSION": 2
- },
- "MobID": "urn:smpte:umid:060a2b34.01010101.01010f00.13000000.060e2b34.7f7f2a80.5c9e6962.cd005cc5",
- "Name": "ppjoshm_1 (SIM1)",
- "Slots": {}
- }
- },
- "name": null
- },
- "metadata": {
- "AAF": {
- "ClassName": "SourceClip",
- "DataDefinition": {
- "Description": "Sound Essence",
- "Identification": "01030202-0200-0000-060e-2b3404010101",
- "Name": "Sound"
- },
- "Length": 10,
- "Name": "ppjoshm_1 (SIM1)",
- "SourceID": "urn:smpte:umid:060a2b34.01010101.01010f00.13000000.060e2b34.7f7f2a80.5c9e6962.cd005cc5",
- "SourceMobSlotID": 2,
- "StartTime": 0
- }
- },
- "name": "ppjoshm_1 (SIM1)",
- "source_range": {
- "OTIO_SCHEMA": "TimeRange.1",
- "duration": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 10
- },
- "start_time": {
- "OTIO_SCHEMA": "RationalTime.1",
- "rate": 24.0,
- "value": 0
- }
- }
- }
- ],
- "effects": [],
- "kind": "Audio",
- "markers": [],
- "metadata": {
- "AAF": {
- "ClassName": "TimelineMobSlot",
- "EditRate": "24",
- "MediaKind": "Sound",
- "Name": "TimelineMobSlot",
- "Origin": 0,
- "PhysicalTrackNumber": 1,
- "Segment": {
- "Components": {},
- "DataDefinition": {
- "Description": "Sound Essence",
- "Identification": "01030202-0200-0000-060e-2b3404010101",
- "Name": "Sound"
- },
- "Length": 10
- },
- "SlotID": 10,
- "SlotName": ""
- }
- },
- "name": "TimelineMobSlot",
- "source_range": null
- }
- ],
- "effects": [],
- "markers": [],
- "metadata": {},
- "name": "tracks",
- "source_range": null
- }
-}
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/preflattened.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/preflattened.aaf
deleted file mode 100644
index 618cf8aa0a..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/preflattened.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/simple.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/simple.aaf
deleted file mode 100644
index 81a09225c9..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/simple.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/subclip_sourceclip_references_compositionmob_with_mastermob.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/subclip_sourceclip_references_compositionmob_with_mastermob.aaf
deleted file mode 100644
index 03178e8126..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/subclip_sourceclip_references_compositionmob_with_mastermob.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/test_muted_clip.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/test_muted_clip.aaf
deleted file mode 100644
index fa8efebf1f..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/test_muted_clip.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/timecode_test.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/timecode_test.aaf
deleted file mode 100644
index 3defe9a124..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/timecode_test.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/transitions.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/transitions.aaf
deleted file mode 100644
index 0b56704f06..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/transitions.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/trims.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/trims.aaf
deleted file mode 100644
index e4953dafdf..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/trims.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/sample_data/utf8.aaf b/contrib/opentimelineio_contrib/adapters/tests/sample_data/utf8.aaf
deleted file mode 100644
index ab36e7f61a..0000000000
Binary files a/contrib/opentimelineio_contrib/adapters/tests/sample_data/utf8.aaf and /dev/null differ
diff --git a/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py b/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py
deleted file mode 100644
index 88ec22c52f..0000000000
--- a/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py
+++ /dev/null
@@ -1,1942 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright Contributors to the OpenTimelineIO project
-
-"""Test the AAF adapter."""
-
-# python
-import os
-import sys
-import unittest
-import tempfile
-
-import opentimelineio as otio
-from opentimelineio_contrib.adapters.aaf_adapter.aaf_writer import (
- AAFAdapterError,
- AAFValidationError
-)
-
-import io
-
-
-TRANSCRIPTION_RESULT = """---
-Transcribing top level mobs
----
-Creating SerializableCollection for Iterable for list
- Creating Timeline for SubclipTSVNoData_NoVideo.Exported.02
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for TimelineMobSlot for DX
- Creating Track for Sequence for Sequence
- Creating operationGroup for OperationGroup
- Creating SourceClip for Subclip.BREATH (Usage_SubClip)
- [found child_mastermob]
- Creating Timeline for subclip
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating SourceClip for x000-0000_01_Xxxxx_Xxx.aaf
- [found no mastermob]
- Creating Track for MobSlot for EventMobSlot
- Creating Track for Sequence for Sequence
- Create marker for DescriptiveMarker
- Creating Track for MobSlot for EventMobSlot
- Creating Track for Sequence for Sequence
- Create marker for DescriptiveMarker
- Creating Track for TimelineMobSlot for TimelineMobSlot
- Creating Track for Sequence for Sequence
- Creating Gap for Filler
- Creating Track for TimelineMobSlot for TimelineMobSlot
-Marker: NEED PDX (time: 360567.0), attached to item: Subclip.BREATH
-"""
-
-
-SAMPLE_DATA_DIR = os.path.join(os.path.dirname(__file__), "sample_data")
-SIMPLE_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "simple.aaf"
-)
-TRANSITIONS_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "transitions.aaf"
-)
-TRIMS_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "trims.aaf"
-)
-MULTITRACK_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "multitrack.aaf"
-)
-PREFLATTENED_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "preflattened.aaf"
-)
-NESTING_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "nesting_test.aaf"
-)
-NESTED_STACK_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "nested_stack.aaf"
-)
-NESTING_PREFLATTENED_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "nesting_test_preflattened.aaf"
-)
-MISC_SPEED_EFFECTS_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "misc_speed_effects.aaf"
-)
-PRECHECK_FAIL_OTIO = os.path.join(
- SAMPLE_DATA_DIR,
- "precheckfail.otio"
-)
-LINEAR_SPEED_EFFECTS_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "linear_speed_effects.aaf"
-)
-TIMCODE_EXAMPLE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "timecode_test.aaf"
-)
-MUTED_CLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "test_muted_clip.aaf"
-)
-ESSENCE_GROUP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "essence_group.aaf"
-)
-ONE_AUDIO_CLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "one_audio_clip.aaf"
-)
-FPS30_CLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "30fps.aaf"
-)
-FPS2997_CLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "2997fps.aaf"
-)
-FPS2997_DFTC_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "2997fps-DFTC.aaf"
-)
-DUPLICATES_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "duplicates.aaf"
-)
-NO_METADATA_OTIO_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "no_metadata.otio"
-)
-NOT_AAF_OTIO_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "not_aaf.otio"
-)
-UTF8_CLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "utf8.aaf"
-)
-MULTIPLE_TOP_LEVEL_MOBS_CLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "multiple_top_level_mobs.aaf"
-)
-GAPS_OTIO_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "gaps.otio"
-)
-COMPOSITE_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "composite.aaf"
-)
-
-SUBCLIP_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "subclip_sourceclip_references_compositionmob_with_mastermob.aaf"
-)
-
-COMPOSITION_METADATA_MASTERMOB_METADATA_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "normalclip_sourceclip_references_compositionmob_"
- "has_also_mastermob_usercomments.aaf"
-)
-
-COMPOSITION_METADATA_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "normalclip_sourceclip_references_compositionmob_"
- "with_usercomments_no_mastermob_usercomments.aaf"
-)
-
-MULTIPLE_TIMECODE_OBJECTS_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "multiple_timecode_objects.aaf"
-)
-
-MULTIPLE_MARKERS_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "multiple_markers.aaf"
-)
-
-KEYFRAMED_PROPERTIES_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "keyframed_properties.aaf"
-)
-
-MARKER_OVER_TRANSITION_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "marker-over-transition.aaf",
-)
-
-MARKER_OVER_AUDIO_PATH = os.path.join(
- SAMPLE_DATA_DIR,
- "marker-over-audio.aaf"
-)
-
-
-try:
- lib_path = os.environ.get("OTIO_AAF_PYTHON_LIB")
- if lib_path and lib_path not in sys.path:
- sys.path.insert(0, lib_path)
- import aaf2 # noqa
- from aaf2.components import (SourceClip,
- Filler,
- Transition,
- Timecode,
- OperationGroup,
- Sequence)
- from aaf2.mobs import MasterMob, SourceMob
- from aaf2.misc import VaryingValue
- could_import_aaf = True
-except (ImportError):
- could_import_aaf = False
-
-
-@unittest.skipIf(
- not could_import_aaf,
- "AAF module not found. You might need to set OTIO_AAF_PYTHON_LIB"
-)
-class AAFReaderTests(unittest.TestCase):
-
- def test_aaf_read(self):
- aaf_path = SIMPLE_EXAMPLE_PATH
- timeline = otio.adapters.read_from_file(aaf_path)
- self.assertEqual(timeline.name, "OTIO TEST 1.Exported.01")
- fps = timeline.duration().rate
- self.assertEqual(fps, 24.0)
- self.assertEqual(
- timeline.duration(),
- otio.opentime.from_timecode("00:02:16:18", fps)
- )
-
- self.assertEqual(len(timeline.tracks), 3)
-
- self.assertEqual(len(timeline.video_tracks()), 1)
- video_track = timeline.video_tracks()[0]
- self.assertEqual(len(video_track), 5)
-
- self.assertEqual(len(timeline.audio_tracks()), 2)
-
- clips = video_track.find_clips()
-
- self.assertEqual(
- [
- "tech.fux (loop)-HD.mp4",
- "t-hawk (loop)-HD.mp4",
- "out-b (loop)-HD.mp4",
- "KOLL-HD.mp4",
- "brokchrd (loop)-HD.mp4"
- ],
- [clip.name for clip in clips]
- )
- self.maxDiff = None
- self.assertEqual(
- [clip.source_range for clip in clips],
- [
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("01:00:00:00", fps),
- otio.opentime.from_timecode("00:00:30:00", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("01:00:00:00", fps),
- otio.opentime.from_timecode("00:00:20:00", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("01:00:00:00", fps),
- otio.opentime.from_timecode("00:00:30:02", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("01:00:00:00", fps),
- otio.opentime.from_timecode("00:00:26:16", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("01:00:00:00", fps),
- otio.opentime.from_timecode("00:00:30:00", fps)
- )
- ]
- )
-
- def test_aaf_global_start_time(self):
- timeline = otio.adapters.read_from_file(SIMPLE_EXAMPLE_PATH)
- self.assertEqual(
- otio.opentime.from_timecode("01:00:00:00", 24),
- timeline.global_start_time
- )
-
- def test_aaf_global_start_time_NTSC_DFTC(self):
- timeline = otio.adapters.read_from_file(FPS2997_DFTC_PATH)
- self.assertEqual(
- otio.opentime.from_timecode("05:00:00;00", rate=(30000.0 / 1001)),
- timeline.global_start_time
- )
-
- def test_aaf_read_trims(self):
- aaf_path = TRIMS_EXAMPLE_PATH
- timeline = otio.adapters.read_from_file(aaf_path)
- self.assertEqual(
- timeline.name,
- "OTIO TEST 1.Exported.01 - trims.Exported.02"
- )
- fps = timeline.duration().rate
- self.assertEqual(fps, 24.0)
-
- video_tracks = timeline.video_tracks()
- self.assertEqual(len(video_tracks), 1)
- video_track = video_tracks[0]
- self.assertEqual(len(video_track), 6)
-
- self.assertEqual(
- [type(item) for item in video_track],
- [
- otio.schema.Clip,
- otio.schema.Clip,
- otio.schema.Clip,
- otio.schema.Clip,
- otio.schema.Gap,
- otio.schema.Clip,
- ]
- )
-
- clips = video_track.find_clips()
-
- self.assertEqual(
- [item.name for item in video_track],
- [
- "tech.fux (loop)-HD.mp4",
- "t-hawk (loop)-HD.mp4",
- "out-b (loop)-HD.mp4",
- "KOLL-HD.mp4",
- "Filler", # Gap
- "brokchrd (loop)-HD.mp4"
- ]
- )
-
- self.maxDiff = None
- desired_ranges = [
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400, fps),
- otio.opentime.from_frames(720 - 0, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400 + 121, fps),
- otio.opentime.from_frames(480 - 121, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400 + 123, fps),
- otio.opentime.from_frames(523 - 123, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(0, fps),
- otio.opentime.from_frames(559 - 0, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400 + 69, fps),
- otio.opentime.from_frames(720 - 69, fps)
- )
- ]
- for clip, desired in zip(clips, desired_ranges):
- actual = clip.source_range
- self.assertEqual(
- actual,
- desired,
- "clip '{}' source_range should be {} not {}".format(
- clip.name,
- desired,
- actual
- )
- )
-
- desired_ranges = [
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("00:00:00:00", fps),
- otio.opentime.from_timecode("00:00:30:00", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("00:00:30:00", fps),
- otio.opentime.from_timecode("00:00:14:23", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("00:00:44:23", fps),
- otio.opentime.from_timecode("00:00:16:16", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("00:01:01:15", fps),
- otio.opentime.from_timecode("00:00:23:07", fps)
- ),
- otio.opentime.TimeRange( # Gap
- otio.opentime.from_timecode("00:01:24:22", fps),
- otio.opentime.from_timecode("00:00:04:12", fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_timecode("00:01:29:10", fps),
- otio.opentime.from_timecode("00:00:27:03", fps)
- )
- ]
- for item, desired in zip(video_track, desired_ranges):
- actual = item.trimmed_range_in_parent()
- self.assertEqual(
- actual,
- desired,
- "item '{}' trimmed_range_in_parent should be {} not {}".format(
- clip.name,
- desired,
- actual
- )
- )
-
- self.assertEqual(
- timeline.duration(),
- otio.opentime.from_timecode("00:01:56:13", fps)
- )
-
- def test_aaf_read_transitions(self):
- aaf_path = TRANSITIONS_EXAMPLE_PATH
- timeline = otio.adapters.read_from_file(aaf_path)
- self.assertEqual(timeline.name, "OTIO TEST - transitions.Exported.01")
- fps = timeline.duration().rate
- self.assertEqual(fps, 24.0)
-
- video_tracks = timeline.video_tracks()
- self.assertEqual(len(video_tracks), 1)
- video_track = video_tracks[0]
- self.assertEqual(len(video_track), 12)
-
- clips = video_track.find_clips()
- self.assertEqual(len(clips), 4)
-
- self.assertEqual(
- [type(item) for item in video_track],
- [
- otio.schema.Gap,
- otio.schema.Transition,
- otio.schema.Clip,
- otio.schema.Transition,
- otio.schema.Clip,
- otio.schema.Transition,
- otio.schema.Gap,
- otio.schema.Transition,
- otio.schema.Clip,
- otio.schema.Clip,
- otio.schema.Transition,
- otio.schema.Gap,
- ]
- )
-
- self.assertEqual(
- [item.name for item in video_track],
- [
- "Filler",
- "Transition",
- "tech.fux (loop)-HD.mp4",
- "Transition",
- "t-hawk (loop)-HD.mp4",
- "Transition",
- "Filler",
- "Transition",
- "KOLL-HD.mp4",
- "brokchrd (loop)-HD.mp4",
- "Transition",
- "Filler"
- ]
- )
-
- self.maxDiff = None
- desired_ranges = [
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400 + 0, fps),
- otio.opentime.from_frames(117, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400 + 123, fps),
- otio.opentime.from_frames(200 - 123, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(55, fps),
- otio.opentime.from_frames(199 - 55, fps)
- ),
- otio.opentime.TimeRange(
- otio.opentime.from_frames(86400 + 0, fps),
- otio.opentime.from_frames(130, fps)
- )
- ]
- for clip, desired in zip(clips, desired_ranges):
- actual = clip.source_range
- self.assertEqual(
- actual,
- desired,
- "clip '{}' source_range should be {} not {}".format(
- clip.name,
- desired,
- actual
- )
- )
-
- desired_ranges = [
- otio.opentime.TimeRange( # Gap
- otio.opentime.from_timecode("00:00:00:00", fps),
- otio.opentime.from_timecode("00:00:00:00", fps)
- ),
- otio.opentime.TimeRange( # Transition
- otio.opentime.from_timecode("00:00:00:00", fps),
- otio.opentime.from_timecode("00:00:00:12", fps)
- ),
- otio.opentime.TimeRange( # tech.fux
- otio.opentime.from_timecode("00:00:00:00", fps),
- otio.opentime.from_timecode("00:00:04:21", fps)
- ),
- otio.opentime.TimeRange( # Transition
- otio.opentime.from_timecode("00:00:02:21", fps),
- otio.opentime.from_timecode("00:00:02:00", fps)
- ),
- otio.opentime.TimeRange( # t-hawk
- otio.opentime.from_timecode("00:00:04:21", fps),
- otio.opentime.from_timecode("00:00:03:05", fps)
- ),
- otio.opentime.TimeRange( # Transition
- otio.opentime.from_timecode("00:00:07:14", fps),
- otio.opentime.from_timecode("00:00:01:00", fps)
- ),
- otio.opentime.TimeRange( # Gap
- otio.opentime.from_timecode("00:00:08:02", fps),
- otio.opentime.from_timecode("00:00:02:05", fps)
- ),
- otio.opentime.TimeRange( # Transition
- otio.opentime.from_timecode("00:00:09:07", fps),
- otio.opentime.from_timecode("00:00:02:00", fps)
- ),
- otio.opentime.TimeRange( # KOLL-HD
- otio.opentime.from_timecode("00:00:10:07", fps),
- otio.opentime.from_timecode("00:00:06:00", fps)
- ),
- otio.opentime.TimeRange( # brokchrd
- otio.opentime.from_timecode("00:00:16:07", fps),
- otio.opentime.from_timecode("00:00:05:10", fps)
- ),
- otio.opentime.TimeRange( # Transition
- otio.opentime.from_timecode("00:00:19:17", fps),
- otio.opentime.from_timecode("00:00:02:00", fps)
- ),
- otio.opentime.TimeRange( # Gap
- otio.opentime.from_timecode("00:00:21:17", fps),
- otio.opentime.from_timecode("00:00:00:00", fps)
- )
- ]
- for item, desired in zip(video_track, desired_ranges):
- actual = item.trimmed_range_in_parent()
- self.assertEqual(
- desired,
- actual,
- "item '{}' trimmed_range_in_parent should be {} not {}".format(
- clip.name,
- desired,
- actual
- )
- )
-
- self.assertEqual(
- timeline.duration(),
- otio.opentime.from_timecode("00:00:21:17", fps)
- )
-
- def test_timecode(self):
- aaf_path = TIMCODE_EXAMPLE_PATH
- timeline = otio.adapters.read_from_file(aaf_path)
- self.assertNotEqual(
- timeline.tracks[0][0].source_range.start_time,
- timeline.tracks[0][1].source_range.start_time
- )
- self.assertEqual(
- timeline.tracks[0][1].source_range.start_time,
- otio.opentime.RationalTime(86424, 24),
- )
-
- def test_aaf_user_comments(self):
- aaf_path = TRIMS_EXAMPLE_PATH
- timeline = otio.adapters.read_from_file(aaf_path)
- self.assertIsNotNone(timeline)
- self.assertEqual(type(timeline), otio.schema.Timeline)
- self.assertIsNotNone(timeline.metadata.get("AAF"))
- correctWords = [
- "test1",
- "testing 1 2 3",
- "Eyjafjallaj\xf6kull",
- "'s' \"d\" `b`",
- None, # Gap
- None
- ]
- for clip, correctWord in zip(timeline.tracks[0], correctWords):
- if isinstance(clip, otio.schema.Gap):
- continue
- AAFmetadata = clip.media_reference.metadata.get("AAF")
- self.assertIsNotNone(AAFmetadata)
- self.assertIsNotNone(AAFmetadata.get("UserComments"))
- self.assertEqual(
- AAFmetadata.get("UserComments").get("CustomTest"),
- correctWord
- )
-
- def test_aaf_flatten_tracks(self):
- multitrack_timeline = otio.adapters.read_from_file(
- MULTITRACK_EXAMPLE_PATH, attach_markers=False
- )
- preflattened_timeline = otio.adapters.read_from_file(
- PREFLATTENED_EXAMPLE_PATH, attach_markers=False
- )
-
- # first make sure we got the structure we expected
- self.assertEqual(3, len(preflattened_timeline.tracks))
- self.assertEqual(1, len(preflattened_timeline.video_tracks()))
- self.assertEqual(2, len(preflattened_timeline.audio_tracks()))
-
- self.assertEqual(3, len(multitrack_timeline.video_tracks()))
- self.assertEqual(2, len(multitrack_timeline.audio_tracks()))
- self.assertEqual(8, len(multitrack_timeline.tracks))
-
- preflattened = preflattened_timeline.video_tracks()[0]
- self.assertEqual(7, len(preflattened))
- flattened = otio.algorithms.flatten_stack(
- multitrack_timeline.video_tracks()
- )
- self.assertEqual(7, len(flattened))
-
- # Lets remove some AAF metadata that will always be different
- # so we can compare everything else.
- for t in (preflattened, flattened):
-
- t.name = ""
- t.metadata.pop("AAF", None)
-
- for c in t.find_children():
- if hasattr(c, "media_reference") and c.media_reference:
- mr = c.media_reference
- mr.metadata.get("AAF", {}).pop('LastModified', None)
- meta = c.metadata.get("AAF", {})
- meta.pop('ComponentAttributeList', None)
- meta.pop('DataDefinition', None)
- meta.pop('Length', None)
- meta.pop('StartTime', None)
-
- # We don't care about Gap start times, only their duration matters
- for g in t.find_children(descended_from_type=otio.schema.Gap):
- dur = g.source_range.duration
- rate = g.source_range.start_time.rate
- g.source_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(0, rate),
- dur
- )
-
- self.maxDiff = None
- self.assertMultiLineEqual(
- otio.adapters.write_to_string(preflattened, "otio_json"),
- otio.adapters.write_to_string(flattened, "otio_json")
- )
-
- def test_aaf_nesting(self):
- timeline = otio.adapters.read_from_file(NESTING_EXAMPLE_PATH)
- self.assertEqual(1, len(timeline.tracks))
- track = timeline.tracks[0]
- self.assertEqual(3, len(track))
-
- clipA, nested, clipB = track
- self.assertEqual(otio.schema.Clip, type(clipA))
- self.assertEqual(otio.schema.Track, type(nested))
- self.assertEqual(otio.schema.Clip, type(clipB))
-
- self.assertEqual(2, len(nested))
- nestedClipA, nestedClipB = nested
- self.assertEqual(otio.schema.Clip, type(nestedClipA))
- self.assertEqual(otio.schema.Clip, type(nestedClipB))
-
- self.assertEqual(
- otio.opentime.TimeRange(
- start_time=otio.opentime.RationalTime(24, 24),
- duration=otio.opentime.RationalTime(16, 24)
- ),
- clipA.trimmed_range()
- )
- self.assertEqual(
- otio.opentime.TimeRange(
- start_time=otio.opentime.RationalTime(86400 + 32, 24),
- duration=otio.opentime.RationalTime(16, 24)
- ),
- clipB.trimmed_range()
- )
-
- self.assertEqual(
- otio.opentime.TimeRange(
- start_time=otio.opentime.RationalTime(40, 24),
- duration=otio.opentime.RationalTime(8, 24)
- ),
- nestedClipA.trimmed_range()
- )
- self.assertEqual(
- otio.opentime.TimeRange(
- start_time=otio.opentime.RationalTime(86400 + 24, 24),
- duration=otio.opentime.RationalTime(8, 24)
- ),
- nestedClipB.trimmed_range()
- )
-
- # TODO: This belongs in the algorithms tests, not the AAF tests.
- def SKIP_test_nesting_flatten(self):
- nested_timeline = otio.adapters.read_from_file(
- NESTING_EXAMPLE_PATH
- )
- preflattened_timeline = otio.adapters.read_from_file(
- NESTING_PREFLATTENED_EXAMPLE_PATH
- )
- flattened_track = otio.algorithms.flatten_stack(nested_timeline.tracks)
- self.assertEqual(
- preflattened_timeline.tracks[0],
- flattened_track
- )
-
- def test_read_linear_speed_effects(self):
- timeline = otio.adapters.read_from_file(
- LINEAR_SPEED_EFFECTS_EXAMPLE_PATH
- )
- self.assertEqual(1, len(timeline.tracks))
- track = timeline.tracks[0]
- self.assertEqual(20, len(track))
-
- clip = track[0]
- self.assertEqual(0, len(clip.effects))
-
- for clip in track[1:]:
- self.assertIsInstance(clip, otio.schema.Clip)
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
-
- expected = [
- 50.00, # 2/1
- 33.33, # 3/1
- 25.00, # 4/1
- 200.00, # 1/2
- 100.00, # 2/2
- 66.67, # 3/2
- 50.00, # 4/2
- 300.00, # 1/3
- 150.00, # 2/3
- 100.00, # 3/3
- 75.00, # 4/3
- 400.00, # 1/4
- 200.00, # 2/4
- 133.33, # 3/4
- 100.00, # 4/4
- 500.00, # 1/5
- 250.00, # 2/5
- 166.67, # 3/5
- 125.00 # 4/5
- ]
- actual = [
- round(clip.effects[0].time_scalar * 100.0, 2) for clip in track[1:]
- ]
- self.assertEqual(expected, actual)
-
- def test_read_misc_speed_effects(self):
- timeline = otio.adapters.read_from_file(
- MISC_SPEED_EFFECTS_EXAMPLE_PATH
- )
- self.assertEqual(1, len(timeline.tracks))
- track = timeline.tracks[0]
- self.assertEqual(10, len(track))
-
- clip = track[0]
- self.assertEqual(0, len(clip.effects))
- self.assertEqual(8, clip.duration().value)
-
- clip = track[1]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.FreezeFrame, type(effect))
- self.assertEqual(0, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[2]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(2.0, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[3]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(0.5, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[4]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(3.0, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[5]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(0.3750, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[6]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(14.3750, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[7]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(0.3750, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[8]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertEqual(otio.schema.LinearTimeWarp, type(effect))
- self.assertEqual(-1.0, effect.time_scalar)
- self.assertEqual(8, clip.duration().value)
-
- clip = track[9]
- self.assertEqual(1, len(clip.effects))
- effect = clip.effects[0]
- self.assertTrue(isinstance(effect, otio.schema.TimeEffect))
- self.assertEqual(16, clip.duration().value)
- # TODO: We don't yet support non-linear time warps, but when we
- # do then this effect is a "Speed Bump" from 166% to 44% to 166%
-
- def test_muted_clip(self):
- timeline = otio.adapters.read_from_file(MUTED_CLIP_PATH)
- self.assertIsInstance(timeline, otio.schema.Timeline)
- self.assertEqual(len(timeline.tracks), 1)
- track = timeline.tracks[0]
- self.assertEqual(len(track), 1)
- clip = track[0]
- self.assertIsInstance(clip, otio.schema.Clip)
- self.assertEqual(clip.name, 'Frame Debugger 0h.mov')
- self.assertEqual(clip.enabled, False)
-
- def test_essence_group(self):
- timeline = otio.adapters.read_from_file(ESSENCE_GROUP_PATH)
-
- self.assertIsNotNone(timeline)
- self.assertEqual(
- otio.opentime.RationalTime(12, 24),
- timeline.duration()
- )
-
- def test_30fps(self):
- tl = otio.adapters.read_from_file(FPS30_CLIP_PATH)
- self.assertEqual(tl.duration().rate, 30)
-
- def test_2997fps(self):
- tl = otio.adapters.read_from_file(FPS2997_CLIP_PATH)
- self.assertEqual(tl.duration().rate, 30000 / 1001.0)
-
- def test_utf8_names(self):
- timeline = otio.adapters.read_from_file(UTF8_CLIP_PATH)
- self.assertEqual(
- ("Sequence_ABCXYZñç꜕∑´®†¥¨ˆøπ“‘åß∂ƒ©˙∆˚¬…æΩ≈ç√∫˜µ≤≥÷.Exported.01"),
- timeline.name
- )
- video_track = timeline.video_tracks()[0]
- first_clip = video_track[0]
- self.assertEqual(
- first_clip.name,
- ("Clip_ABCXYZñç꜕∑´®†¥¨ˆøπ“‘åß∂ƒ©˙∆˚¬…æΩ≈ç√∫˜µ≤≥÷")
- )
- self.assertEqual(
- (
- first_clip.media_reference.metadata["AAF"]["UserComments"]["Comments"]
- ).encode('utf-8'),
- ("Comments_ABCXYZñç꜕∑´®†¥¨ˆøπ“‘åß∂ƒ©˙∆˚¬…æΩ≈ç√∫˜µ≤≥÷").encode()
- )
-
- def test_multiple_top_level_mobs(self):
- result = otio.adapters.read_from_file(MULTIPLE_TOP_LEVEL_MOBS_CLIP_PATH)
- self.assertIsInstance(result, otio.schema.SerializableCollection)
- self.assertEqual(2, len(result))
-
- def test_external_reference_from_unc_path(self):
- timeline = otio.adapters.read_from_file(SIMPLE_EXAMPLE_PATH)
- video_track = timeline.video_tracks()[0]
- first_clip = video_track[0]
- self.assertIsInstance(first_clip.media_reference,
- otio.schema.ExternalReference)
-
- unc_path = first_clip.media_reference.metadata.get("AAF", {}) \
- .get("UserComments", {}) \
- .get("UNC Path")
- unc_path = "file://" + unc_path
- self.assertEqual(
- first_clip.media_reference.target_url,
- unc_path
- )
-
- def test_external_reference_paths(self):
- timeline = otio.adapters.read_from_file(COMPOSITE_PATH)
- video_target_urls = [
- [
- "file:////animation/root/work/editorial/jburnell/700/1.aaf",
- "file:////animation/root/work/editorial/jburnell/700/2.aaf",
- "file:////animation/root/work/editorial/jburnell/700/3.aaf"
- ],
- [
- "file:///C%3A/Avid%20MediaFiles/MXF/1/700.Exported.03_Vi48896FA0V.mxf"
- ]
- ]
- audio_target_urls = [
- [
- "file:///C%3A/OMFI%20MediaFiles/700.ExportA01.5D8A14612890A.aif"
- ]
- ]
-
- for track_index, video_track in enumerate(timeline.video_tracks()):
- for clip_index, clip in enumerate(video_track):
- self.assertIsInstance(clip.media_reference,
- otio.schema.ExternalReference)
- self.assertEqual(clip.media_reference.target_url,
- video_target_urls[track_index][clip_index])
-
- for track_index, audio_track in enumerate(timeline.audio_tracks()):
- for clip_index, clip in enumerate(audio_track):
- self.assertIsInstance(clip.media_reference,
- otio.schema.ExternalReference)
- self.assertEqual(clip.media_reference.target_url,
- audio_target_urls[track_index][clip_index])
-
- def test_aaf_subclip_metadata(self):
- """
- For subclips, the AAF SourceClip can actually reference a CompositionMob
- (instead of a MasterMob)
- In which case we need to drill down into the CompositionMob
- to find the MasterMob with the UserComments.
- """
-
- timeline = otio.adapters.read_from_file(SUBCLIP_PATH)
- audio_track = timeline.audio_tracks()[0]
- first_clip = audio_track[0]
-
- aaf_metadata = first_clip.media_reference.metadata.get("AAF")
-
- expected_md = {"Director": "director_name",
- "Line": "script_line",
- "Talent": "Speaker",
- "Logger": "logger",
- "Character": "character_name"}
-
- self._verify_user_comments(aaf_metadata, expected_md)
-
- def test_aaf_sourcemob_usage(self):
- """
- Each clip stores it's source mob usage AAF value as metadata in`SourceMobUsage`.
- For sub-clips this value should be `Usage_SubClip`.
- """
- # `Usage_SubClip` value
- subclip_timeline = otio.adapters.read_from_file(SUBCLIP_PATH)
- subclip_usages = {"Subclip.BREATH": "Usage_SubClip"}
- for clip in subclip_timeline.find_clips():
- self.assertEqual(
- clip.metadata.get("AAF", {}).get("SourceMobUsage"),
- subclip_usages[clip.name]
- )
-
- # no usage value
- simple_timeline = otio.adapters.read_from_file(SIMPLE_EXAMPLE_PATH)
- simple_usages = {
- "KOLL-HD.mp4": "",
- "brokchrd (loop)-HD.mp4": "",
- "out-b (loop)-HD.mp4": "",
- "t-hawk (loop)-HD.mp4": "",
- "tech.fux (loop)-HD.mp4": ""
- }
- for clip in simple_timeline.find_clips():
- self.assertEqual(
- clip.metadata.get("AAF", {}).get("SourceMobUsage", ""),
- simple_usages[clip.name]
- )
-
- def test_aaf_composition_metadata(self):
- """
- For standard clips the AAF SourceClip can actually reference a
- CompositionMob (instead of a MasterMob) and the composition mob is holding the
- UserComments instead of the MasterMob.
- My guess is that the CompositionMob is used to share the same metadata
- between different SourceClips
- """
-
- timeline = otio.adapters.read_from_file(COMPOSITION_METADATA_PATH)
-
- audio_track = timeline.audio_tracks()[0]
- first_clip = audio_track[0]
-
- aaf_metadata = first_clip.media_reference.metadata.get("AAF")
-
- expected_md = {"Director": "director",
- "Line": "scriptline",
- "Talent": "talent",
- "Logger": "",
- "Character": "character"}
-
- self._verify_user_comments(aaf_metadata, expected_md)
-
- def test_aaf_composition_metadata_mastermob(self):
- """
- For standard clips the AAF SourceClip can actually reference a
- CompositionMob (instead of a masterMob), the CompositionMob is holding
- UserComments AND the MasterMob is holding UserComments.
- In this case the masterMob has the valid UserComments (empirically determined)
- """
-
- timeline = otio.adapters.read_from_file(
- COMPOSITION_METADATA_MASTERMOB_METADATA_PATH)
-
- audio_track = timeline.audio_tracks()[0]
- first_clip = audio_track[0]
-
- aaf_metadata = first_clip.metadata.get("AAF")
-
- expected_md = {"Director": "director",
- "Line": "scriptline",
- "Talent": "talent",
- "Logger": "logger",
- "Character": "character"}
-
- self._verify_user_comments(aaf_metadata, expected_md)
-
- def test_aaf_multiple_timecode_objects(self):
- """
- Make sure we can read SourceClips with multiple timecode objects of the
- same start value and length.
- """
-
- timeline = otio.adapters.read_from_file(
- MULTIPLE_TIMECODE_OBJECTS_PATH)
-
- self.assertIsNotNone(timeline)
-
- video_track = timeline.video_tracks()[0]
- only_clip = video_track[0]
-
- available_range = only_clip.media_reference.available_range
-
- self.assertEqual(available_range.start_time.value, 86501.0)
- self.assertEqual(available_range.duration.value, 1981.0)
-
- def test_aaf_transcribe_log(self):
- """Excercise an aaf-adapter read with transcribe_logging enabled."""
-
- # capture output of debugging statements
- old_stdout = sys.stdout
- old_stderr = sys.stderr
-
- sys.stdout = io.StringIO()
- sys.stderr = io.StringIO()
- otio.adapters.read_from_file(SUBCLIP_PATH, transcribe_log=True)
- result_stdout = sys.stdout.getvalue()
- result_stderr = sys.stderr.getvalue()
-
- sys.stdout = old_stdout
- sys.stderr = old_stderr
-
- # conform python 2 and 3 behavior
- result_stdout = result_stdout.replace("b'", "").replace("'", "")
-
- self.assertEqual(result_stdout, TRANSCRIPTION_RESULT)
- self.assertEqual(result_stderr, '')
-
- def test_aaf_marker_over_transition(self):
- """
- Make sure we can transcibe this composition with markers over transition.
- """
-
- timeline = None
-
- try:
- timeline = otio.adapters.read_from_file(
- MARKER_OVER_TRANSITION_PATH
- )
-
- except Exception as e:
- print('[ERROR] Transcribing test sample data `{}` caused an exception: {}'.format( # noqa
- os.path.basename(MARKER_OVER_TRANSITION_PATH),
- e)
- )
-
- self.assertIsNotNone(timeline)
-
- def test_aaf_marker_over_audio_file(self):
- """
- Make sure we can transcibe markers over an audio AAF file.
- """
-
- timeline = None
-
- try:
- timeline = otio.adapters.read_from_file(
- MARKER_OVER_AUDIO_PATH
- )
-
- except Exception as e:
- print('[ERROR] Transcribing test sample data `{}` caused an exception: {}'.format( # noqa
- os.path.basename(MARKER_OVER_AUDIO_PATH),
- e)
- )
-
- self.assertIsNotNone(timeline)
-
- # Verify markers
- # We expect 1 track with 3 markers on it from the test data.
- self.assertTrue(1 == len(timeline.tracks))
-
- track = timeline.tracks[0]
- self.assertEqual(3, len(track.markers))
-
- fps = 24.0
- expected_markers = [
- {
- 'color': 'RED',
- 'label': 'm1',
- 'start_time': otio.opentime.from_frames(50.0, fps)
- },
- {
- 'color': 'GREEN',
- 'label': 'm2',
- 'start_time': otio.opentime.from_frames(103.0, fps)
- },
- {
- 'color': 'BLUE',
- 'label': 'm3',
- 'start_time': otio.opentime.from_frames(166.0, fps)
- }
- ]
-
- for index, marker in enumerate(track.markers):
- expected_marker = expected_markers[index]
-
- color = marker.color
- label = marker.metadata.get('AAF', {}).get('CommentMarkerUSer')
- start_time = marker.marked_range.start_time
-
- self.assertEqual(color, expected_marker.get('color'))
- self.assertEqual(label, expected_marker.get('label'))
- self.assertEqual(start_time, expected_marker.get('start_time'))
-
- def _verify_user_comments(self, aaf_metadata, expected_md):
-
- self.assertTrue(aaf_metadata is not None)
- self.assertTrue("UserComments" in aaf_metadata.keys())
-
- user_comments = aaf_metadata['UserComments']
-
- user_comment_keys = user_comments.keys()
- for k, v in expected_md.items():
- self.assertTrue(k in user_comment_keys)
- self.assertEqual(user_comments[k], v)
-
- def test_attach_markers(self):
- """Check if markers are correctly translated and attached to the right items.
- """
- timeline = otio.adapters.read_from_file(MULTIPLE_MARKERS_PATH,
- attach_markers=True)
-
- expected_markers = {
- (1, 'Filler'): [('PUBLISH', 0.0, 1.0, 24.0, 'RED')],
- (1, 'zts02_1010'): [
- ('GREEN: V1: zts02_1010: f1104: seq.f1104',
- 1103.0, 1.0, 24.0, 'GREEN')
- ],
- (2, 'ScopeReference'): [
- ('FX', 0.0, 1.0, 24.0, 'YELLOW'),
- ('BLUE: V2 (no FX): zts02_1020: f1134: seq.f1327',
- 518.0, 1.0, 24.0, 'BLUE')
- ],
- (3, 'ScopeReference'): [
- ('INSERT', 0.0, 1.0, 24.0, 'CYAN'),
- ('CYAN: V3: zts02_1030: f1212: seq.f1665',
- 856.0,
- 1.0,
- 24.0,
- 'CYAN')
- ],
- (4, 'Drop_24.mov'): [
- ('MAGENTA: V4: zts02_1040: f1001: seq.f1666',
- 86400.0, 1.0, 24.0, 'MAGENTA')
- ],
- (5, 'ScopeReference'): [
- ('RED: V5: zts02_1050: f1061: seq.f1885',
- 884.0, 1.0, 24.0, 'RED')
- ]
- }
-
- all_markers = {}
- for i, track in enumerate(
- timeline.find_children(descended_from_type=otio.schema.Track)
- ):
- for item in track.find_children():
- markers = [
- (
- m.name,
- m.marked_range.start_time.value,
- m.marked_range.duration.value,
- m.marked_range.start_time.rate,
- m.color
- ) for m in item.markers
- ]
- if markers:
- all_markers[(i, item.name)] = markers
- self.assertEqual(all_markers, expected_markers)
-
- def test_keyframed_properties(self):
- def get_expected_dict(timeline):
- expected = []
- for clip in timeline.find_children(descended_from_type=otio.schema.Clip):
- for effect in clip.effects:
- props = {}
- parameters = effect.metadata.get("AAF", {}).get("Parameters", {})
- for paramName, paramValue in parameters.items():
- try:
- is_animated = "_aaf_keyframed_property" in paramValue
- except (TypeError, KeyError):
- is_animated = False
- try:
- baked_count = len(paramValue["keyframe_baked_values"])
- except (TypeError, KeyError):
- baked_count = None
- props[paramName] = {"keyframed": is_animated,
- "baked_sample_count": baked_count}
- expected.append(props)
- return expected
-
- tl_unbaked = otio.adapters.read_from_file(KEYFRAMED_PROPERTIES_PATH,
- bake_keyframed_properties=False)
-
- tl_baked = otio.adapters.read_from_file(KEYFRAMED_PROPERTIES_PATH,
- bake_keyframed_properties=True)
-
- expected_unbaked = [
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_SCALE_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_SCALE_X_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_SCALE_Y_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_ROT_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_ROT_X_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_ROT_Y_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_ROT_Z_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- "Vergence": {"baked_sample_count": None, "keyframed": True},
- },
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_POS_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_POS_X_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_POS_Y_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_POS_Z_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- "Vergence": {"baked_sample_count": None, "keyframed": True},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": None, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": None,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": None, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": None,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": None, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": None,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": None, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": None,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": None, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": None,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_PRSP_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_PRSP_X_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_PRSP_Y_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_PRSP_Z_U": {"baked_sample_count": None, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- "Vergence": {"baked_sample_count": None, "keyframed": True},
- },
- ]
-
- expected_baked = [
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_SCALE_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_SCALE_X_U": {"baked_sample_count": 212, "keyframed": True},
- "DVE_SCALE_Y_U": {"baked_sample_count": 212, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_ROT_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_ROT_X_U": {"baked_sample_count": 159, "keyframed": True},
- "DVE_ROT_Y_U": {"baked_sample_count": 159, "keyframed": True},
- "DVE_ROT_Z_U": {"baked_sample_count": 159, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- "Vergence": {"baked_sample_count": 159, "keyframed": True},
- },
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_POS_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_POS_X_U": {"baked_sample_count": 116, "keyframed": True},
- "DVE_POS_Y_U": {"baked_sample_count": 116, "keyframed": True},
- "DVE_POS_Z_U": {"baked_sample_count": 116, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- "Vergence": {"baked_sample_count": 116, "keyframed": True},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": 276, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": 276,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": 182, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": 182,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": 219, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": 219,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": 193, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": 193,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AvidMotionInputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionOutputFormat": {"baked_sample_count": None,
- "keyframed": False},
- "AvidMotionPulldown": {"baked_sample_count": None, "keyframed": False},
- "AvidPhase": {"baked_sample_count": None, "keyframed": False},
- "PARAM_SPEED_MAP_U": {"baked_sample_count": 241, "keyframed": True},
- "PARAM_SPEED_OFFSET_MAP_U": {"baked_sample_count": 241,
- "keyframed": True},
- "SpeedRatio": {"baked_sample_count": None, "keyframed": False},
- },
- {
- "AFX_FIXED_ASPECT_U": {"baked_sample_count": None, "keyframed": False},
- "AvidEffectID": {"baked_sample_count": None, "keyframed": False},
- "AvidParameterByteOrder": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_BORDER_ENABLED_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_DEFOCUS_MODE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_FG_KEY_HIGH_SAT_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_MT_WARP_FOREGROUND_U": {"baked_sample_count": None,
- "keyframed": False},
- "DVE_PRSP_ENABLED_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_PRSP_X_U": {"baked_sample_count": 241, "keyframed": True},
- "DVE_PRSP_Y_U": {"baked_sample_count": 241, "keyframed": True},
- "DVE_PRSP_Z_U": {"baked_sample_count": 241, "keyframed": True},
- "DVE_TRACKING_POS_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_AMPLT_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_CURVE_U": {"baked_sample_count": None, "keyframed": False},
- "DVE_WARP_FREQ_U": {"baked_sample_count": None, "keyframed": False},
- "Vergence": {"baked_sample_count": 241, "keyframed": True},
- },
- ]
-
- self.assertEqual(get_expected_dict(tl_unbaked), expected_unbaked)
- self.assertEqual(get_expected_dict(tl_baked), expected_baked)
-
-
-class AAFWriterTests(unittest.TestCase):
- def test_aaf_writer_gaps(self):
- otio_timeline = otio.adapters.read_from_file(GAPS_OTIO_PATH)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path)
- self._verify_aaf(tmp_aaf_path)
-
- def test_aaf_writer_simple(self):
- self._verify_aaf(SIMPLE_EXAMPLE_PATH)
-
- def test_aaf_writer_transitions(self):
- self._verify_aaf(TRANSITIONS_EXAMPLE_PATH)
-
- def test_aaf_writer_duplicates(self):
- self._verify_aaf(DUPLICATES_PATH)
-
- def test_aaf_writer_nometadata(self):
- def _target_url_fixup(timeline):
- # fixes up relative paths to be absolute to this test file
- test_dir = os.path.dirname(os.path.abspath(__file__))
- for clip in timeline.find_clips():
- target_url_str = clip.media_reference.target_url
- clip.media_reference.target_url = os.path.join(test_dir, target_url_str)
-
- # Exercise getting Mob IDs from AAF files
- otio_timeline = otio.adapters.read_from_file(NO_METADATA_OTIO_PATH)
- _target_url_fixup(otio_timeline)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path)
- self._verify_aaf(tmp_aaf_path)
-
- # Expect exception to raise on non AAF files with no metadata
- otio_timeline = otio.adapters.read_from_file(NOT_AAF_OTIO_PATH)
- _target_url_fixup(otio_timeline)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- with self.assertRaises(AAFAdapterError):
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path)
-
- # Generate empty Mob IDs fallback for not crashing
- otio_timeline = otio.adapters.read_from_file(NOT_AAF_OTIO_PATH)
- _target_url_fixup(otio_timeline)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path, use_empty_mob_ids=True)
- self._verify_aaf(tmp_aaf_path)
-
- def test_fail_on_precheck(self):
- # Expect exception to raise on null available_range and rate mismatch
- otio_timeline = otio.adapters.read_from_file(PRECHECK_FAIL_OTIO)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- try:
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path)
- except AAFValidationError as e:
- # Four error messages are raised
- self.assertEqual(4, len(list(filter(bool, str(e).split("\n")))))
- with self.assertRaises(AAFValidationError):
- raise e
-
- def test_aaf_roundtrip_first_clip(self):
- def _target_url_fixup(timeline):
- # fixes up relative paths to be absolute to this test file
- test_dir = os.path.dirname(os.path.abspath(__file__))
- for clip in timeline.find_clips():
- target_url_str = clip.media_reference.target_url
- clip.media_reference.target_url = os.path.join(test_dir, target_url_str)
-
- # Exercise getting Mob IDs from AAF files
- otio_timeline = otio.adapters.read_from_file(NO_METADATA_OTIO_PATH)
- _target_url_fixup(otio_timeline)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path)
- self._verify_first_clip(otio_timeline, tmp_aaf_path)
-
- def _verify_first_clip(self, original_timeline, aaf_path):
- timeline_from_aaf = otio.adapters.read_from_file(aaf_path)
-
- original_clips = original_timeline.find_clips()
- aaf_clips = timeline_from_aaf.find_clips()
-
- self.assertTrue(len(original_clips) > 0)
- self.assertEqual(len(aaf_clips), len(original_clips))
-
- first_clip_in_original_timeline = original_clips[0]
- first_clip_in_aaf_timeline = aaf_clips[0]
-
- # Comparing stuff
- for prop in ['source_range']:
- self.assertEqual(getattr(first_clip_in_original_timeline, prop),
- getattr(first_clip_in_aaf_timeline, prop),
- f"`{prop}` did not match")
-
- for method in ['visible_range', 'trimmed_range']:
- self.assertEqual(getattr(first_clip_in_original_timeline, method)(),
- getattr(first_clip_in_aaf_timeline, method)(),
- f"`{method}` did not match")
-
- def test_aaf_writer_nesting(self):
- self._verify_aaf(NESTING_EXAMPLE_PATH)
-
- def test_aaf_writer_nested_stack(self):
- self._verify_aaf(NESTED_STACK_EXAMPLE_PATH)
-
- def test_generator_reference(self):
- tl = otio.schema.Timeline()
- cl = otio.schema.Clip()
- cl.source_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(0, 24),
- otio.opentime.RationalTime(100, 24),
- )
- tl.tracks.append(otio.schema.Track())
- tl.tracks[0].append(cl)
- cl.media_reference = otio.schema.GeneratorReference()
- cl.media_reference.generator_kind = "Slug"
- cl.media_reference.available_range = otio.opentime.TimeRange(
- otio.opentime.RationalTime(0, 24),
- otio.opentime.RationalTime(100, 24),
- )
- _, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
-
- mod = otio.adapters.from_name('AAF').module()
-
- self.assertTrue(
- mod.aaf_writer._is_considered_gap(cl)
- )
-
- otio.adapters.write_to_file(tl, tmp_aaf_path)
- self._verify_aaf(tmp_aaf_path)
-
- with self.assertRaises(otio.exceptions.NotSupportedError):
- cl.media_reference.generator_kind = "not slug"
- otio.adapters.write_to_file(tl, tmp_aaf_path)
-
- def _verify_aaf(self, aaf_path):
- otio_timeline = otio.adapters.read_from_file(aaf_path, simplify=True)
- fd, tmp_aaf_path = tempfile.mkstemp(suffix='.aaf')
- otio.adapters.write_to_file(otio_timeline, tmp_aaf_path)
-
- with aaf2.open(tmp_aaf_path) as dest, aaf2.open(aaf_path) as orig:
- # Basic number of mobs should be equal
- self.assertEqual(len(list(orig.content.compositionmobs())),
- len(list(dest.content.compositionmobs())))
- self.assertEqual(len(list(orig.content.mastermobs())),
- len(list(dest.content.mastermobs())))
-
- compositionmobs = list(dest.content.compositionmobs())
- self.assertEqual(1, len(compositionmobs))
- compositionmob = compositionmobs[0]
- self.assertEqual(len(otio_timeline.tracks), len(compositionmob.slots))
-
- for otio_track, aaf_timeline_mobslot in zip(otio_timeline.tracks,
- compositionmob.slots):
-
- media_kind = aaf_timeline_mobslot.media_kind.lower()
- self.assertTrue(media_kind in ["picture", "sound"])
- kind_mapping = {
- "picture": otio.schema.TrackKind.Video,
- "sound": otio.schema.TrackKind.Audio
- }
- self.assertEqual(otio_track.kind, kind_mapping[media_kind])
-
- sequence = None
- if media_kind == "picture":
- sequence = aaf_timeline_mobslot.segment
- elif media_kind == "sound":
- opgroup = aaf_timeline_mobslot.segment
- self.assertTrue(isinstance(opgroup, OperationGroup))
- input_segments = opgroup.segments
- self.assertTrue(hasattr(input_segments, "__iter__"))
- self.assertTrue(len(input_segments) >= 1)
- sequence = opgroup.segments[0]
- self.assertTrue(isinstance(sequence, Sequence))
-
- self.assertEqual(
- len(otio_track.find_children(shallow_search=True)),
- len(sequence.components))
- for otio_child, aaf_component in zip(
- otio_track.find_children(shallow_search=True),
- sequence.components):
- type_mapping = {
- otio.schema.Clip: aaf2.components.SourceClip,
- otio.schema.Transition: aaf2.components.Transition,
- otio.schema.Gap: aaf2.components.Filler,
- otio.schema.Stack: aaf2.components.OperationGroup,
- otio.schema.Track: aaf2.components.OperationGroup
- }
- self.assertEqual(type(aaf_component),
- type_mapping[type(otio_child)])
-
- if isinstance(aaf_component, SourceClip):
- self._verify_compositionmob_sourceclip_structure(aaf_component)
-
- if isinstance(aaf_component, aaf2.components.OperationGroup):
- nested_aaf_segments = aaf_component.segments
- for nested_otio_child, nested_aaf_segment in zip(
- otio_child.find_children(), nested_aaf_segments):
- self._is_otio_aaf_same(nested_otio_child,
- nested_aaf_segment)
- else:
- self._is_otio_aaf_same(otio_child, aaf_component)
-
- # Inspect the OTIO -> AAF -> OTIO file
- roundtripped_otio = otio.adapters.read_from_file(tmp_aaf_path, simplify=True)
-
- self.assertIsNotNone(roundtripped_otio)
- self.assertTrue(isinstance(roundtripped_otio, otio.schema.Timeline))
- self.assertEqual(otio_timeline.name, roundtripped_otio.name)
- self.assertEqual(otio_timeline.duration().rate,
- roundtripped_otio.duration().rate)
-
- def _verify_compositionmob_sourceclip_structure(self, compmob_clip):
- self.assertTrue(isinstance(compmob_clip, SourceClip))
- self.assertTrue(isinstance(compmob_clip.mob, MasterMob))
- mastermob = compmob_clip.mob
- for mastermob_slot in mastermob.slots:
- mastermob_clip = mastermob_slot.segment
- self.assertTrue(isinstance(mastermob_clip, SourceClip))
- self.assertTrue(isinstance(mastermob_clip.mob, SourceMob))
- filemob = mastermob_clip.mob
-
- self.assertEqual(1, len(filemob.slots))
- filemob_clip = filemob.slots[0].segment
-
- self.assertTrue(isinstance(filemob_clip, SourceClip))
- self.assertTrue(isinstance(filemob_clip.mob, SourceMob))
- tapemob = filemob_clip.mob
- self.assertTrue(len(tapemob.slots) >= 2)
-
- timecode_slots = [tape_slot for tape_slot in tapemob.slots
- if isinstance(tape_slot.segment,
- Timecode)]
-
- self.assertEqual(1, len(timecode_slots))
-
- for tape_slot in tapemob.slots:
- tapemob_component = tape_slot.segment
- if not isinstance(tapemob_component, Timecode):
- self.assertTrue(isinstance(tapemob_component, SourceClip))
- tapemob_clip = tapemob_component
- self.assertEqual(None, tapemob_clip.mob)
- self.assertEqual(None, tapemob_clip.slot)
- self.assertEqual(0, tapemob_clip.slot_id)
-
- def _is_otio_aaf_same(self, otio_child, aaf_component):
- if isinstance(aaf_component, SourceClip):
- orig_mob_id = str(otio_child.metadata["AAF"]["SourceID"])
- dest_mob_id = str(aaf_component.mob.mob_id)
- self.assertEqual(orig_mob_id, dest_mob_id)
-
- if isinstance(aaf_component, (SourceClip, Filler)):
- orig_duration = otio_child.visible_range().duration.value
- dest_duration = aaf_component.length
- self.assertEqual(orig_duration, dest_duration)
-
- if isinstance(aaf_component, Transition):
- orig_pointlist = otio_child.metadata["AAF"]["PointList"]
- params = aaf_component["OperationGroup"].value.parameters
- varying_value = [param for param in params if isinstance(param,
- VaryingValue)][0]
- dest_pointlist = varying_value.getvalue("PointList")
- for orig_point, dest_point in zip(orig_pointlist, dest_pointlist):
- self.assertEqual(orig_point["Value"], dest_point.value)
- self.assertEqual(orig_point["Time"], dest_point.time)
-
-
-class SimplifyTests(unittest.TestCase):
- def test_aaf_simplify(self):
- aaf_path = SIMPLE_EXAMPLE_PATH
- timeline = otio.adapters.read_from_file(aaf_path, simplify=True)
- self.assertIsNotNone(timeline)
- self.assertEqual(type(timeline), otio.schema.Timeline)
- self.assertEqual(timeline.name, "OTIO TEST 1.Exported.01")
- fps = timeline.duration().rate
- self.assertEqual(fps, 24.0)
- self.assertEqual(
- timeline.duration(),
- otio.opentime.from_timecode("00:02:16:18", fps)
- )
- self.assertEqual(len(timeline.tracks), 3)
- self.assertEqual(otio.schema.TrackKind.Video, timeline.tracks[0].kind)
- self.assertEqual(otio.schema.TrackKind.Audio, timeline.tracks[1].kind)
- self.assertEqual(otio.schema.TrackKind.Audio, timeline.tracks[2].kind)
- for track in timeline.tracks:
- self.assertNotEqual(type(track[0]), otio.schema.Track)
- self.assertEqual(len(track), 5)
-
- def test_aaf_no_simplify(self):
- aaf_path = SIMPLE_EXAMPLE_PATH
- collection = otio.adapters.read_from_file(aaf_path, simplify=False)
- self.assertIsNotNone(collection)
- self.assertEqual(type(collection), otio.schema.SerializableCollection)
- self.assertEqual(len(collection), 1)
-
- timeline = collection[0]
- self.assertEqual(timeline.name, "OTIO TEST 1.Exported.01")
- fps = timeline.duration().rate
- self.assertEqual(fps, 24.0)
- self.assertEqual(
- timeline.duration(),
- otio.opentime.from_timecode("00:02:16:18", fps)
- )
-
- self.assertEqual(len(timeline.tracks), 12)
-
- video_track = timeline.tracks[8][0]
- self.assertEqual(otio.schema.TrackKind.Video, video_track.kind)
- self.assertEqual(len(video_track), 5)
-
- def test_simplify_top_level_track(self):
- """Test for cases where a track has a single item but should not be
- collapsed because it is the the last track in the stack ie:
-
- TL
- tracks Stack
- track1
- clip
-
- in this case, track1 should not be pruned.
- """
-
- # get the simplified form of the clip
- tl = otio.adapters.read_from_file(ONE_AUDIO_CLIP_PATH, simplify=True)
-
- # ensure that we end up with a track that contains a clip
- self.assertEqual(type(tl.tracks[0]), otio.schema.Track)
- self.assertEqual(tl.tracks[0].kind, otio.schema.TrackKind.Audio)
- self.assertEqual(type(tl.tracks[0][0]), otio.schema.Clip)
-
- def test_simplify_track_stack_track(self):
- tl = otio.schema.Timeline()
- tl.tracks.append(otio.schema.Track())
- tl.tracks[0].append(otio.schema.Stack())
- tl.tracks[0][0].append(otio.schema.Track())
- tl.tracks[0][0][0].append(otio.schema.Clip())
-
- from opentimelineio_contrib.adapters import advanced_authoring_format
- simple_tl = advanced_authoring_format._simplify(tl)
-
- self.assertEqual(
- type(simple_tl.tracks[0][0]), otio.schema.Clip
- )
-
- tl = otio.schema.Timeline()
- tl.tracks.append(otio.schema.Track())
- tl.tracks[0].append(otio.schema.Stack())
- tl.tracks[0][0].append(otio.schema.Track())
- tl.tracks[0][0][0].append(otio.schema.Track())
- tl.tracks[0][0][0][0].append(otio.schema.Clip())
-
- from opentimelineio_contrib.adapters import advanced_authoring_format
- simple_tl = advanced_authoring_format._simplify(tl)
-
- # top level thing should not be a clip
- self.assertEqual(
- type(simple_tl.tracks[0]), otio.schema.Track
- )
- self.assertEqual(
- type(simple_tl.tracks[0][0]), otio.schema.Clip
- )
-
- def test_simplify_stack_clip_clip(self):
- tl = otio.schema.Timeline()
- tl.tracks.append(otio.schema.Track())
- tl.tracks[0].append(otio.schema.Stack())
- tl.tracks[0][0].append(otio.schema.Clip())
- tl.tracks[0][0].append(otio.schema.Clip())
-
- from opentimelineio_contrib.adapters import advanced_authoring_format
- simple_tl = advanced_authoring_format._simplify(tl)
-
- self.assertNotEqual(
- type(simple_tl.tracks[0]), otio.schema.Clip
- )
- self.assertEqual(
- type(simple_tl.tracks[0][0]), otio.schema.Stack
- )
-
- def test_simplify_stack_track_clip(self):
- tl = otio.schema.Timeline()
- tl.tracks.append(otio.schema.Track())
- tl.tracks[0].append(otio.schema.Stack())
- tl.tracks[0][0].append(otio.schema.Track())
- tl.tracks[0][0][0].append(otio.schema.Clip())
- tl.tracks[0][0].append(otio.schema.Track())
- tl.tracks[0][0][1].append(otio.schema.Clip())
-
- from opentimelineio_contrib.adapters import advanced_authoring_format
- simple_tl = advanced_authoring_format._simplify(tl)
-
- # None of the things in the top level stack should be a clip
- for i in simple_tl.tracks:
- self.assertNotEqual(type(i), otio.schema.Clip)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/docs/tutorials/otio-plugins.md b/docs/tutorials/otio-plugins.md
index 66b2327340..7a81ee4ce8 100644
--- a/docs/tutorials/otio-plugins.md
+++ b/docs/tutorials/otio-plugins.md
@@ -282,51 +282,6 @@ Adapter plugins convert to and from OpenTimelineIO.
[Tutorial on how to write an adapter](write-an-adapter).
-### AAF
-
-```
-OpenTimelineIO Advanced Authoring Format (AAF) Adapter
-
-Depending on if/where PyAAF is installed, you may need to set this env var:
- OTIO_AAF_PYTHON_LIB - should point at the PyAAF module.
-```
-
-*source*: `opentimelineio_contrib/adapters/advanced_authoring_format.py`
-
-
-*Supported Features (with arguments)*:
-
-- read_from_file:
-```
-Reads AAF content from `filepath` and outputs an OTIO
- timeline object.
-
- Args:
- filepath (str): AAF filepath
- simplify (bool, optional): simplify timeline structure by stripping empty
- items
- transcribe_log (bool, optional): log activity as items are getting
- transcribed
- attach_markers (bool, optional): attaches markers to their appropriate items
- like clip, gap. etc on the track
- bake_keyframed_properties (bool, optional): bakes animated property values
- for each frame in a source clip
- Returns:
- otio.schema.Timeline
-```
- - filepath
- - simplify
- - transcribe_log
- - attach_markers
- - bake_keyframed_properties
-- write_to_file:
- - input_otio
- - filepath
-
-
-
-
-
### ale
```
diff --git a/setup.py b/setup.py
index c3cf6c48d1..51b2ae712e 100644
--- a/setup.py
+++ b/setup.py
@@ -360,7 +360,6 @@ def run(self):
python_requires='>=3.7, !=3.9.0', # noqa: E501
install_requires=[
- 'pyaaf2>=1.4,<1.7',
'importlib_metadata>=1.4; python_version < "3.8"',
],
entry_points={