Last active
August 29, 2015 14:02
-
-
Save jernoble/50752b97d73c76673e5e to your computer and use it in GitHub Desktop.
Implements the 'Coded Frame Processing' algorithm of the Media Source Extension spec.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* Copyright (c) 2014 Jer Noble | |
* | |
* Permission is hereby granted, free of charge, to any person obtaining a copy | |
* of this software and associated documentation files (the "Software"), to deal | |
* in the Software without restriction, including without limitation the rights | |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
* copies of the Software, and to permit persons to whom the Software is | |
* furnished to do so, subject to the following conditions: | |
* | |
* The above copyright notice and this permission notice shall be included in | |
* all copies or substantial portions of the Software. | |
* | |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
* THE SOFTWARE. | |
*/ | |
#include <algorithm> | |
#include <cmath> | |
#include <iostream> | |
#include <limits> | |
#include <map> | |
#include <stdio.h> | |
#include <vector> | |
using namespace std; | |
static double unset() | |
{ | |
return numeric_limits<double>::quiet_NaN(); | |
} | |
static bool isSet(const double& value) | |
{ | |
return !isnan(value); | |
} | |
static bool isUnset(const double& value) | |
{ | |
return isnan(value); | |
} | |
class CodedFrame { | |
public: | |
CodedFrame(double presentationTimestamp, double decodeTimestamp, double duration, bool isRandomAccess, unsigned generation) | |
: presentationTimestamp(presentationTimestamp) | |
, decodeTimestamp(decodeTimestamp) | |
, duration(duration) | |
, isRandomAccess(isRandomAccess) | |
, generation(generation) | |
{ | |
} | |
double presentationTimestamp; | |
double decodeTimestamp; | |
double duration; | |
bool isRandomAccess; | |
unsigned generation; | |
}; | |
typedef multimap<double, CodedFrame> CodedFrameMap; | |
static bool codedFrameIsRandomAccess(CodedFrameMap::value_type pair) | |
{ | |
return pair.second.isRandomAccess; | |
} | |
class TrackBuffer { | |
public: | |
TrackBuffer() | |
: lastDecodeTimestamp(unset()) | |
, lastFrameDuration(unset()) | |
, highestPresentationTimestamp(unset()) | |
, needsRandomAccessFlag(true) | |
{ | |
} | |
double lastDecodeTimestamp; | |
double lastFrameDuration; | |
double highestPresentationTimestamp; | |
double needsRandomAccessFlag; | |
CodedFrameMap codedFrames; // in decode order | |
}; | |
class SourceBuffer { | |
public: | |
SourceBuffer() | |
: presentationStartTime(0) | |
{ | |
} | |
enum ErrorCode { NoError, NetworkError, DecodeError }; | |
ErrorCode processCodedFrames(vector<CodedFrame> codedFrames); | |
TrackBuffer trackBuffer; | |
double presentationStartTime; | |
}; | |
SourceBuffer::ErrorCode SourceBuffer::processCodedFrames(vector<CodedFrame> codedFrames) | |
{ | |
// When complete coded frames have been parsed by the segment parser loop then the following steps are run: | |
// 1. For each coded frame in the media segment run the following steps: | |
for (auto& codedFrame : codedFrames) { | |
// 1. Loop top: | |
loopTop: | |
// ↳ If generate timestamps flag equals true: | |
// 1. Let presentation timestamp equal 0. | |
// 2. Let decode timestamp equal 0. | |
// No-op. | |
// ↳ Otherwise: | |
// 1. Let presentation timestamp be a double precision floating point representation | |
// of the coded frame's presentation timestamp in seconds. | |
double presentationTimestamp = codedFrame.presentationTimestamp; | |
// 2. Let decode timestamp be a double precision floating point representation | |
// of the coded frame's decode timestamp in seconds. | |
double decodeTimestamp = codedFrame.decodeTimestamp; | |
// 2. Let frame duration be a double precision floating point representation | |
// of the coded frame's duration in seconds. | |
double frameDuration = codedFrame.duration; | |
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps: | |
// IGNORE: mode="sequence". | |
// 4. If timestampOffset is not 0, then run the following steps: | |
// IGNORE: timestampOffset. | |
// 5. Let track buffer equal the track buffer that the coded frame will be added to. | |
// ASSUME: a single track buffer, trackBuffer. | |
// 6. ↳ If last decode timestamp for track buffer is set and decode timestamp | |
// is less than last decode timestamp: | |
if ((isSet(trackBuffer.lastDecodeTimestamp) && decodeTimestamp < trackBuffer.lastDecodeTimestamp) | |
// OR | |
|| | |
// ↳ If last decode timestamp for track buffer is set and the difference | |
// between decode timestamp and last decode timestamp is greater than | |
// 2 times last frame duration: | |
(isSet(trackBuffer.lastDecodeTimestamp) && (abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (2 * trackBuffer.lastFrameDuration)))) { | |
// 1. ↳ If mode equals "segments": | |
// ↳ If mode equals "sequence": | |
// IGNORE: group end timestamp and group start timestamp | |
// 2. Unset the last decode timestamp on all track buffers. | |
trackBuffer.lastDecodeTimestamp = unset(); | |
// 3. Unset the last frame duration on all track buffers. | |
trackBuffer.lastFrameDuration = unset(); | |
// 4. Unset the highest presentation timestamp on all track buffers. | |
trackBuffer.highestPresentationTimestamp = unset(); | |
// 5. Set the need random access point flag on all track buffers to true. | |
trackBuffer.needsRandomAccessFlag = true; | |
// 6. Jump to the Loop Top step above to restart processing of the current coded frame. | |
goto loopTop; | |
} | |
// ↳ Otherwise: | |
// Continue. | |
// 7. If the presentation timestamp or decode timestamp is less than the presentation start time, | |
// then run the end of stream algorithm with the error parameter set to "decode", and abort | |
// these steps. | |
if (presentationTimestamp < presentationStartTime || decodeTimestamp < presentationStartTime) | |
return DecodeError; | |
// 8. Let frame end timestamp equal the sum of presentation timestamp and frame duration. | |
double frameEndTimestamp = presentationTimestamp + frameDuration; | |
// 9. If presentation timestamp is less than appendWindowStart, then set the need random access point | |
// flag to true, drop the coded frame, and jump to the top of the loop to start processing the next | |
// coded frame. | |
// 10. If frame end timestamp is greater than appendWindowEnd, then set the need random access point | |
// flag to true, drop the coded frame, and jump to the top of the loop to start processing the next | |
// coded frame. | |
// IGNORE: append windows. | |
// 11. If the need random access point flag on track buffer equals true, then run the following steps: | |
if (trackBuffer.needsRandomAccessFlag) { | |
// 1. If the coded frame is not a random access point, then drop the coded frame and jump to the | |
// top of the loop to start processing the next coded frame. | |
if (!codedFrame.isRandomAccess) | |
continue; | |
// 2. Set the need random access point flag on track buffer to false. | |
trackBuffer.needsRandomAccessFlag = false; | |
} | |
// 12. Let spliced audio frame be an unset variable for holding audio splice information | |
// IGNORE: spliced audio. | |
// 13. Let spliced timed text frame be an unset variable for holding timed text splice information | |
// IGNORE: spliced text. | |
// 14. If last decode timestamp for track buffer is unset and presentation timestamp falls within | |
// the presentation interval of a coded frame in track buffer,then run the following steps: | |
auto timeFallsWithinPresentationInterval = [presentationTimestamp](CodedFrameMap::value_type pair) -> bool { | |
CodedFrame& codedFrame = pair.second; | |
// The presentation interval of a coded frame is the time interval from its presentation timestamp to | |
// the presentation timestamp plus the coded frame's duration. Note that the start of the range is | |
// inclusive, but the end of the range is exclusive. | |
return presentationTimestamp >= codedFrame.presentationTimestamp | |
&& presentationTimestamp < codedFrame.presentationTimestamp + codedFrame.duration; | |
}; | |
if (isUnset(trackBuffer.lastDecodeTimestamp) && | |
any_of(trackBuffer.codedFrames.begin(), trackBuffer.codedFrames.end(), timeFallsWithinPresentationInterval)) { | |
// 1. Let overlapped frame be the coded frame in track buffer that matches the condition above. | |
auto overlappedFrameIter = find_if(trackBuffer.codedFrames.begin(), trackBuffer.codedFrames.end(), timeFallsWithinPresentationInterval); | |
CodedFrame& overlappedFrame = overlappedFrameIter->second; | |
// 2. ↳ If track buffer contains audio coded frames: | |
// ↳ If track buffer contains timed text coded frames: | |
// ↳ If track buffer contains video coded frames: | |
// ASSUME: video frames | |
{ | |
// 1. Let overlapped frame presentation timestamp equal the presentation timestamp of | |
// overlapped frame. | |
double overlappedFramePresentationTimestamp = overlappedFrame.presentationTimestamp; | |
// 2. Let remove window timestamp equal overlapped frame presentation timestamp plus | |
// 1 microsecond. | |
double removeWindowTimestamp = overlappedFramePresentationTimestamp + 0.000001; | |
// 3. If the presentation timestamp is less than the remove window timestamp, then | |
// remove overlapped frame and any coded frames that depend on it from track buffer. | |
if (presentationTimestamp < removeWindowTimestamp) { | |
auto nextSyncSampleIter = overlappedFrameIter; | |
nextSyncSampleIter = find_if(++nextSyncSampleIter, trackBuffer.codedFrames.end(), codedFrameIsRandomAccess); | |
// NOTE: this will remove frames in decode order. | |
trackBuffer.codedFrames.erase(overlappedFrameIter, nextSyncSampleIter); | |
} | |
} | |
} | |
CodedFrameMap removedCodedFrames; | |
// 15. Remove existing coded frames in track buffer: | |
// ↳ If highest presentation timestamp for track buffer is not set: | |
if (isUnset(trackBuffer.highestPresentationTimestamp)) { | |
// Remove all coded frames from track buffer that have a presentation timestamp greater | |
// than or equal to presentation timestamp and less than frame end timestamp. | |
CodedFrameMap replacementCodedFrames; | |
for (auto& pair : trackBuffer.codedFrames) { | |
CodedFrame& codedFrame = pair.second; | |
if (codedFrame.presentationTimestamp >= presentationTimestamp && codedFrame.presentationTimestamp < frameEndTimestamp) | |
removedCodedFrames.insert(pair); | |
} | |
// Frames will be actually removed in step 16. | |
} | |
// ↳ If highest presentation timestamp for track buffer is set and less than presentation timestamp | |
if (isSet(trackBuffer.highestPresentationTimestamp) && trackBuffer.highestPresentationTimestamp < presentationTimestamp) { | |
// Remove all coded frames from track buffer that have a presentation timestamp greater than | |
// highest presentation timestamp and less than or equal to frame end timestamp. | |
CodedFrameMap replacementCodedFrames; | |
for (auto& pair : trackBuffer.codedFrames) { | |
CodedFrame& codedFrame = pair.second; | |
if (codedFrame.presentationTimestamp > trackBuffer.highestPresentationTimestamp && codedFrame.presentationTimestamp <= frameEndTimestamp) | |
removedCodedFrames.insert(pair); | |
} | |
// Frames will be actually removed in step 16. | |
} | |
// 16. Remove decoding dependencies of the coded frames removed in the previous step: | |
// ↳ If detailed information about decoding dependencies is available: | |
// ASSUME: detailed dependancy information is unavailable. | |
// ↳ Otherwise: | |
// Remove all coded frames between the coded frames removed in the previous step and the | |
// next random access point after those removed frames. | |
for (auto& pair : removedCodedFrames) { | |
double removedCodedFrameDecodeTimestamp = pair.first; | |
auto removedFrameIter = trackBuffer.codedFrames.find(removedCodedFrameDecodeTimestamp); | |
// Coded frame may have been removed in a previous step. | |
if (removedFrameIter == trackBuffer.codedFrames.end()) | |
continue; | |
auto nextSyncSampleIter = removedFrameIter; | |
nextSyncSampleIter = find_if(++nextSyncSampleIter, trackBuffer.codedFrames.end(), codedFrameIsRandomAccess); | |
// NOTE: this will remove frames in decode order. | |
trackBuffer.codedFrames.erase(removedFrameIter, nextSyncSampleIter); | |
} | |
// 17. ↳ If spliced audio frame is set: | |
// ↳ If spliced timed text frame is set: | |
// IGNORE: splicing. | |
// ↳ Otherwise: | |
// Add the coded frame with the presentation timestamp, decode timestamp, and frame duration | |
// to the track buffer. | |
trackBuffer.codedFrames.emplace(decodeTimestamp, codedFrame); | |
// 18. Set last decode timestamp for track buffer to decode timestamp. | |
trackBuffer.lastDecodeTimestamp = decodeTimestamp; | |
// 19. Set last frame duration for track buffer to frame duration. | |
trackBuffer.lastFrameDuration = frameDuration; | |
// 20. If highest presentation timestamp for track buffer is unset or frame end timestamp is greater | |
// than highest presentation timestamp, then set highest presentation timestamp for track buffer | |
// to frame end timestamp. | |
if (isUnset(trackBuffer.highestPresentationTimestamp) || frameEndTimestamp > trackBuffer.highestPresentationTimestamp) | |
trackBuffer.highestPresentationTimestamp = frameEndTimestamp; | |
// 21. If frame end timestamp is greater than group end timestamp, then set group end timestamp | |
// equal to frame end timestamp. | |
// IGNORE: group end timestamp. | |
// 22. If generate timestamps flag equals true, then set timestampOffset equal to frame end timestamp. | |
// IGNORE: generate timestamps flag. | |
} | |
// 2. If the HTMLMediaElement.readyState attribute is HAVE_METADATA and the new coded frames cause all objects | |
// in activeSourceBuffers to have media data for the current playback position, then run the following steps: | |
// 3. If the HTMLMediaElement.readyState attribute is HAVE_CURRENT_DATA and the new coded frames cause all objects | |
// in activeSourceBuffers to have media data beyond the current playback position, then run the following steps: | |
// 4. If the HTMLMediaElement.readyState attribute is HAVE_FUTURE_DATA and the new coded frames cause all objects | |
// in activeSourceBuffers to have enough data to ensure uninterrupted playback, then run the following steps: | |
// 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with | |
// new duration set to the maximum of the current duration and the group end timestamp. | |
// IGNORE: ready state changes. | |
return NoError; | |
} | |
ostream& operator<<(ostream& stream, CodedFrame& sample) | |
{ | |
return stream | |
<< "{" | |
<< " PTS(" << sample.presentationTimestamp << ")" | |
<< " DTS(" << sample.decodeTimestamp << ")" | |
<< " duration(" << sample.duration << ")" | |
<< " isRandomAccess(" << sample.isRandomAccess << ")" | |
<< " generation(" << sample.generation << ")" | |
<< " }"; | |
} | |
ostream& operator<<(ostream& stream, CodedFrameMap& map) | |
{ | |
for (auto& pair : map) | |
stream << pair.second << endl; | |
return stream; | |
} | |
int main(void) | |
{ | |
SourceBuffer sourceBuffer; | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(0, 0, 1, true, 1), | |
CodedFrame(1, 1, 1, false, 1), | |
CodedFrame(2, 2, 1, false, 1), | |
CodedFrame(3, 3, 1, true, 1), | |
CodedFrame(4, 4, 1, false, 1), | |
CodedFrame(5, 5, 1, false, 1), | |
}); | |
cout << "Round 1: initial state - length:" << sourceBuffer.trackBuffer.codedFrames.size() << endl | |
<< sourceBuffer.trackBuffer.codedFrames | |
<< endl; | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(0, 0, 1, true, 2), | |
CodedFrame(1, 1, 1, false, 2), | |
CodedFrame(2, 2, 1, false, 2), | |
CodedFrame(3, 3, 1, true, 2), | |
CodedFrame(4, 4, 1, false, 2), | |
CodedFrame(5, 5, 1, false, 2), | |
}); | |
cout << "Round 2: ordered P frames - length:" << sourceBuffer.trackBuffer.codedFrames.size() << endl | |
<< sourceBuffer.trackBuffer.codedFrames | |
<< endl; | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(0, 0, 1, true, 3), | |
CodedFrame(2, 1, 1, false, 3), | |
CodedFrame(1, 2, 1, false, 3), | |
CodedFrame(3, 3, 1, true, 3), | |
CodedFrame(5, 4, 1, false, 3), | |
CodedFrame(4, 5, 1, false, 3), | |
}); | |
cout << "Round 3: unordered P frames - length:" << sourceBuffer.trackBuffer.codedFrames.size() << endl | |
<< sourceBuffer.trackBuffer.codedFrames | |
<< endl; | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(0, 0, 1, true, 4), | |
CodedFrame(2, 1, 1, false, 4), | |
CodedFrame(1, 2, 1, false, 4), | |
CodedFrame(3, 3, 1, true, 4), | |
CodedFrame(5, 4, 1, false, 4), | |
CodedFrame(4, 5, 1, false, 4), | |
}); | |
cout << "Round 4: unordered P frames - length:" << sourceBuffer.trackBuffer.codedFrames.size() << endl | |
<< sourceBuffer.trackBuffer.codedFrames | |
<< endl; | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(2, 0, 1, true, 5), | |
CodedFrame(0, 1, 1, false, 5), | |
CodedFrame(1, 2, 1, false, 5), | |
CodedFrame(5, 3, 1, true, 5), | |
CodedFrame(3, 4, 1, false, 5), | |
CodedFrame(4, 5, 1, false, 5), | |
}); | |
cout << "Round 5: ordered B frames - length:" << sourceBuffer.trackBuffer.codedFrames.size() << endl | |
<< sourceBuffer.trackBuffer.codedFrames | |
<< endl; | |
sourceBuffer.trackBuffer.codedFrames.clear(); | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(0, 0, 1, true, 1), | |
CodedFrame(1, 1, 1, false, 1), | |
CodedFrame(2, 2, 1, false, 1), | |
CodedFrame(3, 3, 1, true, 1), | |
CodedFrame(4, 4, 1, false, 1), | |
CodedFrame(5, 5, 1, false, 1), | |
}); | |
sourceBuffer.processCodedFrames({ | |
CodedFrame(0.5, 0.5, 1, true, 6), | |
CodedFrame(1.5, 1.5, 1, false, 6), | |
CodedFrame(2.5, 2.5, 1, false, 6), | |
CodedFrame(3.5, 3.5, 1, true, 6), | |
CodedFrame(4.5, 4.5, 1, false, 6), | |
CodedFrame(5.5, 5.5, 1, false, 6), | |
}); | |
cout << "Round 6: ordered unaligned P frames - length:" << sourceBuffer.trackBuffer.codedFrames.size() << endl | |
<< sourceBuffer.trackBuffer.codedFrames | |
<< endl; | |
} |
Round 1: initial state - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(1) }
{ PTS(1) DTS(1) duration(1) isRandomAccess(0) generation(1) }
{ PTS(2) DTS(2) duration(1) isRandomAccess(0) generation(1) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(1) }
{ PTS(4) DTS(4) duration(1) isRandomAccess(0) generation(1) }
{ PTS(5) DTS(5) duration(1) isRandomAccess(0) generation(1) }
Round 2: ordered P frames - length:9
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(2) }
{ PTS(1) DTS(1) duration(1) isRandomAccess(0) generation(2) }
{ PTS(2) DTS(2) duration(1) isRandomAccess(0) generation(2) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(1) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(2) }
{ PTS(4) DTS(4) duration(1) isRandomAccess(0) generation(1) }
{ PTS(4) DTS(4) duration(1) isRandomAccess(0) generation(2) }
{ PTS(5) DTS(5) duration(1) isRandomAccess(0) generation(1) }
{ PTS(5) DTS(5) duration(1) isRandomAccess(0) generation(2) }
Round 3: unordered P frames - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(3) }
{ PTS(2) DTS(1) duration(1) isRandomAccess(0) generation(3) }
{ PTS(1) DTS(2) duration(1) isRandomAccess(0) generation(3) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(3) }
{ PTS(5) DTS(4) duration(1) isRandomAccess(0) generation(3) }
{ PTS(4) DTS(5) duration(1) isRandomAccess(0) generation(3) }
Round 4: unordered P frames - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(4) }
{ PTS(2) DTS(1) duration(1) isRandomAccess(0) generation(4) }
{ PTS(1) DTS(2) duration(1) isRandomAccess(0) generation(4) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(4) }
{ PTS(5) DTS(4) duration(1) isRandomAccess(0) generation(4) }
{ PTS(4) DTS(5) duration(1) isRandomAccess(0) generation(4) }
Round 5: ordered B frames - length:8
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(4) }
{ PTS(2) DTS(0) duration(1) isRandomAccess(1) generation(5) }
{ PTS(0) DTS(1) duration(1) isRandomAccess(0) generation(5) }
{ PTS(1) DTS(2) duration(1) isRandomAccess(0) generation(5) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(4) }
{ PTS(5) DTS(3) duration(1) isRandomAccess(1) generation(5) }
{ PTS(3) DTS(4) duration(1) isRandomAccess(0) generation(5) }
{ PTS(4) DTS(5) duration(1) isRandomAccess(0) generation(5) }
Round 6: ordered unaligned P frames - length:10
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(1) }
{ PTS(0.5) DTS(0.5) duration(1) isRandomAccess(1) generation(6) }
{ PTS(1.5) DTS(1.5) duration(1) isRandomAccess(0) generation(6) }
{ PTS(2.5) DTS(2.5) duration(1) isRandomAccess(0) generation(6) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(1) }
{ PTS(3.5) DTS(3.5) duration(1) isRandomAccess(1) generation(6) }
{ PTS(4) DTS(4) duration(1) isRandomAccess(0) generation(1) }
{ PTS(4.5) DTS(4.5) duration(1) isRandomAccess(0) generation(6) }
{ PTS(5) DTS(5) duration(1) isRandomAccess(0) generation(1) }
{ PTS(5.5) DTS(5.5) duration(1) isRandomAccess(0) generation(6) }
If §15 part 2 is changed to read:
If highest presentation timestamp for track buffer is set and less than or equal to presentation timestamp
Remove all coded frames from track buffer that have a presentation timestamp greater than or equal to highest presentation timestamp and less thanor equal toframe end timestamp.
Than the results for the same tests are as follows:
Round 1: initial state - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(1) }
{ PTS(1) DTS(1) duration(1) isRandomAccess(0) generation(1) }
{ PTS(2) DTS(2) duration(1) isRandomAccess(0) generation(1) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(1) }
{ PTS(4) DTS(4) duration(1) isRandomAccess(0) generation(1) }
{ PTS(5) DTS(5) duration(1) isRandomAccess(0) generation(1) }
Round 2: ordered P frames - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(2) }
{ PTS(1) DTS(1) duration(1) isRandomAccess(0) generation(2) }
{ PTS(2) DTS(2) duration(1) isRandomAccess(0) generation(2) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(2) }
{ PTS(4) DTS(4) duration(1) isRandomAccess(0) generation(2) }
{ PTS(5) DTS(5) duration(1) isRandomAccess(0) generation(2) }
Round 3: unordered P frames - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(3) }
{ PTS(2) DTS(1) duration(1) isRandomAccess(0) generation(3) }
{ PTS(1) DTS(2) duration(1) isRandomAccess(0) generation(3) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(3) }
{ PTS(5) DTS(4) duration(1) isRandomAccess(0) generation(3) }
{ PTS(4) DTS(5) duration(1) isRandomAccess(0) generation(3) }
Round 4: unordered P frames - length:6
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(4) }
{ PTS(2) DTS(1) duration(1) isRandomAccess(0) generation(4) }
{ PTS(1) DTS(2) duration(1) isRandomAccess(0) generation(4) }
{ PTS(3) DTS(3) duration(1) isRandomAccess(1) generation(4) }
{ PTS(5) DTS(4) duration(1) isRandomAccess(0) generation(4) }
{ PTS(4) DTS(5) duration(1) isRandomAccess(0) generation(4) }
Round 5: ordered B frames - length:7
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(4) }
{ PTS(2) DTS(0) duration(1) isRandomAccess(1) generation(5) }
{ PTS(0) DTS(1) duration(1) isRandomAccess(0) generation(5) }
{ PTS(1) DTS(2) duration(1) isRandomAccess(0) generation(5) }
{ PTS(5) DTS(3) duration(1) isRandomAccess(1) generation(5) }
{ PTS(3) DTS(4) duration(1) isRandomAccess(0) generation(5) }
{ PTS(4) DTS(5) duration(1) isRandomAccess(0) generation(5) }
Round 6: ordered unaligned P frames - length:7
{ PTS(0) DTS(0) duration(1) isRandomAccess(1) generation(1) }
{ PTS(0.5) DTS(0.5) duration(1) isRandomAccess(1) generation(6) }
{ PTS(1.5) DTS(1.5) duration(1) isRandomAccess(0) generation(6) }
{ PTS(2.5) DTS(2.5) duration(1) isRandomAccess(0) generation(6) }
{ PTS(3.5) DTS(3.5) duration(1) isRandomAccess(1) generation(6) }
{ PTS(4.5) DTS(4.5) duration(1) isRandomAccess(0) generation(6) }
{ PTS(5.5) DTS(5.5) duration(1) isRandomAccess(0) generation(6) }
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Compile with: