|
// Copyright 2025 The Chromium Authors |
|
// Use of this source code is governed by a BSD-style license that can be |
|
// found in the LICENSE file. |
|
|
|
#include "third_party/blink/renderer/platform/image-decoders/jxl/jxl_image_decoder.h" |
|
|
|
#include <array> |
|
|
|
#include "base/containers/span.h" |
|
#include "base/logging.h" |
|
#include "base/numerics/byte_conversions.h" |
|
#include "base/numerics/checked_math.h" |
|
#include "base/time/time.h" |
|
#include "third_party/blink/renderer/platform/image-decoders/fast_shared_buffer_reader.h" |
|
#include "third_party/skia/include/core/SkColorSpace.h" |
|
#include "third_party/skia/include/core/SkTypes.h" |
|
|
|
namespace blink { |
|
|
|
namespace { |
|
|
|
// The maximum JXL file size we are willing to decode. This helps prevent |
|
// resource exhaustion from malicious files. Matches AVIF decoder limit. |
|
constexpr uint64_t kMaxJxlFileSize = 0x10000000; // 256 MB |
|
|
|
|
|
} // namespace |
|
|
|
JXLImageDecoder::JXLImageDecoder(AlphaOption alpha_option, |
|
HighBitDepthDecodingOption hbd_option, |
|
ColorBehavior color_behavior, |
|
cc::AuxImage aux_image, |
|
wtf_size_t max_decoded_bytes, |
|
AnimationOption animation_option) |
|
: ImageDecoder(alpha_option, |
|
hbd_option, |
|
color_behavior, |
|
aux_image, |
|
max_decoded_bytes) { |
|
basic_info_ = {}; |
|
basic_info_.have_animation = false; |
|
} |
|
|
|
JXLImageDecoder::~JXLImageDecoder() = default; |
|
|
|
String JXLImageDecoder::FilenameExtension() const { |
|
return "jxl"; |
|
} |
|
|
|
const AtomicString& JXLImageDecoder::MimeType() const { |
|
DEFINE_STATIC_LOCAL(const AtomicString, jxl_mime_type, ("image/jxl")); |
|
return jxl_mime_type; |
|
} |
|
|
|
bool JXLImageDecoder::ImageIsHighBitDepth() { |
|
return is_high_bit_depth_; |
|
} |
|
|
|
void JXLImageDecoder::OnSetData(scoped_refptr<SegmentReader> data) { |
|
// OnSetData is called when more data becomes available for the same image. |
|
// We should NOT reset metadata state here - that would destroy animation |
|
// info. The Decode() method handles feeding data to the decoder and rewinding |
|
// for animation loops when needed. |
|
// |
|
// Note: Unlike OnSetData implementations that reset state, we preserve: |
|
// - basic_info_ (image dimensions, animation settings) |
|
// - frame_info_ (frame durations, headers) |
|
// - have_metadata_ (whether we've parsed the header) |
|
// - all_frames_discovered_ (whether we know all frames) |
|
// |
|
// The decoder will be fed fresh data in Decode() which handles this properly. |
|
} |
|
|
|
bool JXLImageDecoder::MatchesJXLSignature( |
|
const FastSharedBufferReader& fast_reader) { |
|
uint8_t buffer[12]; |
|
if (fast_reader.size() < sizeof(buffer)) { |
|
return false; |
|
} |
|
auto data = fast_reader.GetConsecutiveData(0, sizeof(buffer), buffer); |
|
return jxl_rs_signature_check( |
|
rust::Slice<const uint8_t>(data.data(), data.size())); |
|
} |
|
|
|
void JXLImageDecoder::DecodeSize() { |
|
Decode(0, /*only_size=*/true); |
|
} |
|
|
|
wtf_size_t JXLImageDecoder::DecodeFrameCount() { |
|
// IMPORTANT: Must parse metadata FIRST to know if this is an animation! |
|
// Otherwise basic_info_.have_animation will be false (default) and we'll |
|
// incorrectly return 1 frame. |
|
if (!have_metadata_) { |
|
Decode(0, /*only_size=*/true); |
|
} |
|
|
|
if (!basic_info_.have_animation) { |
|
return 1; |
|
} |
|
|
|
// Return the number of frames we've discovered so far. |
|
// Per the DecodeFrameCount API, it's valid to return an increasing count |
|
// as frames are received and parsed (like PNG decoder does). |
|
wtf_size_t count = frame_info_.size(); |
|
if (count == 0) { |
|
count = 1; |
|
} |
|
|
|
// Ensure frame buffer cache is large enough. |
|
if (frame_buffer_cache_.size() < count) { |
|
frame_buffer_cache_.resize(count); |
|
} |
|
|
|
DVLOG(1) << "JXL DecodeFrameCount: " << count |
|
<< " all_discovered=" << all_frames_discovered_ |
|
<< " have_animation=" << basic_info_.have_animation; |
|
return count; |
|
} |
|
|
|
void JXLImageDecoder::InitializeNewFrame(wtf_size_t index) { |
|
DCHECK_LT(index, frame_buffer_cache_.size()); |
|
auto& buffer = frame_buffer_cache_[index]; |
|
|
|
if (is_high_bit_depth_ && |
|
high_bit_depth_decoding_option_ == kHighBitDepthToHalfFloat) { |
|
buffer.SetPixelFormat(ImageFrame::PixelFormat::kRGBA_F16); |
|
} |
|
|
|
buffer.SetHasAlpha(basic_info_.has_alpha); |
|
buffer.SetPremultiplyAlpha(premultiply_alpha_); |
|
buffer.SetOriginalFrameRect(gfx::Rect(Size())); |
|
buffer.SetRequiredPreviousFrameIndex(kNotFound); |
|
|
|
if (index < frame_info_.size()) { |
|
buffer.SetDuration(frame_info_[index].duration); |
|
|
|
// Calculate timestamp as sum of all previous frame durations. |
|
base::TimeDelta timestamp; |
|
for (wtf_size_t i = 0; i < index; ++i) { |
|
timestamp += frame_info_[i].duration; |
|
} |
|
buffer.SetTimestamp(timestamp); |
|
} |
|
} |
|
|
|
void JXLImageDecoder::Decode(wtf_size_t index) { |
|
Decode(index, false); |
|
} |
|
|
|
void JXLImageDecoder::Decode(wtf_size_t index, bool only_size) { |
|
if (Failed()) { |
|
return; |
|
} |
|
|
|
// Check file size limit. |
|
if (data_ && data_->size() > kMaxJxlFileSize) { |
|
SetFailed(); |
|
return; |
|
} |
|
|
|
if (only_size && IsDecodedSizeAvailable() && have_metadata_) { |
|
return; |
|
} |
|
|
|
// Early return if the requested frame is already fully decoded and cached. |
|
// This avoids unnecessary re-decoding during animation loops. |
|
if (!only_size && index < frame_buffer_cache_.size()) { |
|
auto status = frame_buffer_cache_[index].GetStatus(); |
|
if (status == ImageFrame::kFrameComplete) { |
|
return; // Frame is already cached. |
|
} |
|
} |
|
|
|
// For animations, decode ALL frames when first requested. |
|
// Unlike WebP/GIF which can seek to individual frames via their demuxer APIs, |
|
// JXL must decode sequentially. Without eager decoding, requesting frame N |
|
// while at frame M (M < N) would block while decoding M+1 through N, |
|
// causing animation timing jitter. |
|
if (!only_size && basic_info_.have_animation && IsAllDataReceived() && |
|
all_frames_discovered_ && !all_frames_decoded_) { |
|
DecodeAllFrames(); |
|
// After decoding all frames, the requested frame should be cached. |
|
if (index < frame_buffer_cache_.size() && |
|
frame_buffer_cache_[index].GetStatus() == ImageFrame::kFrameComplete) { |
|
return; |
|
} |
|
} |
|
|
|
// Get input data from Blink's buffer (no copying needed). |
|
FastSharedBufferReader reader(data_.get()); |
|
size_t data_size = reader.size(); |
|
|
|
// Determine if we need to rewind the decoder. |
|
bool need_rewind = false; |
|
|
|
if (decoder_.has_value()) { |
|
// Rewind when transitioning from metadata scan to actual decode. |
|
// During metadata scan we process frames to discover count/durations, |
|
// so we need to rewind to decode actual pixel data from the beginning. |
|
if (!only_size && all_frames_discovered_ && num_decoded_frames_ == 0) { |
|
need_rewind = true; |
|
} |
|
|
|
// Rewind for animation loop: requesting a frame before what we've decoded, |
|
// but only if the frame isn't already cached to avoid re-decoding. |
|
if (!only_size && basic_info_.have_animation) { |
|
bool frame_already_cached = |
|
index < frame_buffer_cache_.size() && |
|
frame_buffer_cache_[index].GetStatus() == ImageFrame::kFrameComplete; |
|
|
|
if (!frame_already_cached) { |
|
// Only rewind if we're truly going backwards (like looping to frame 0). |
|
// Don't rewind if we're just continuing forward or filling gaps. |
|
bool is_sequential_or_forward = index >= num_decoded_frames_; |
|
|
|
if (!is_sequential_or_forward) { |
|
// We're requesting a frame before what we've decoded. |
|
// This is a loop/rewind situation. |
|
need_rewind = true; |
|
} |
|
} |
|
} |
|
} |
|
|
|
if (need_rewind) { |
|
// Use rewind() for animations (preserves pixel format), reset() otherwise. |
|
if (basic_info_.have_animation) { |
|
(*decoder_)->rewind(); |
|
} else { |
|
(*decoder_)->reset(); |
|
} |
|
num_decoded_frames_ = 0; |
|
num_frame_events_in_scan_ = 0; |
|
input_offset_ = 0; // Reset input position for rewind. |
|
// Note: We preserve all_frames_discovered_ - once we know the frame count, |
|
// we don't need to re-scan. Only reset it if we're doing a fresh metadata |
|
// scan (only_size=true), not when rewinding for pixel decode. |
|
if (only_size) { |
|
all_frames_discovered_ = false; |
|
} |
|
// Note: We don't clear frame pixel data here because: |
|
// 1. For animations, ClearCacheExceptFrame() prevents clearing, so frames |
|
// remain cached and we'll return early at the top of Decode(). |
|
// 2. For non-animated images, there's only one frame. |
|
// 3. If a frame was externally cleared, its status is already kFrameEmpty. |
|
} |
|
|
|
// Create decoder if needed. |
|
if (!decoder_.has_value()) { |
|
decoder_ = jxl_rs_decoder_create(); |
|
num_decoded_frames_ = 0; |
|
input_offset_ = 0; |
|
} |
|
|
|
// Process until we get what we need. |
|
// Data is passed directly to the decoder without buffering. |
|
for (;;) { |
|
// Get remaining input data from current offset. |
|
size_t remaining_size = data_size - input_offset_; |
|
if (remaining_size == 0 && !IsAllDataReceived()) { |
|
// No more data available yet, wait for more. |
|
return; |
|
} |
|
|
|
// Use a local buffer for GetConsecutiveData - this is just for the |
|
// SegmentReader interface, the actual data comes from Blink's data_. |
|
// We read in chunks to avoid allocating huge buffers for large files. |
|
constexpr size_t kMaxChunkSize = 1024 * 1024; // 1MB chunks |
|
size_t chunk_size = std::min(remaining_size, kMaxChunkSize); |
|
Vector<uint8_t> chunk_buffer(chunk_size); |
|
auto data_span = reader.GetConsecutiveData(input_offset_, chunk_size, |
|
base::span(chunk_buffer)); |
|
|
|
JxlRsProcessResult result = (*decoder_)->process( |
|
rust::Slice<const uint8_t>(data_span.data(), data_span.size()), |
|
IsAllDataReceived() && (input_offset_ + chunk_size >= data_size)); |
|
|
|
JxlRsStatus status = result.status; |
|
|
|
switch (status) { |
|
case JxlRsStatus::Error: |
|
SetFailed(); |
|
return; |
|
|
|
case JxlRsStatus::NeedMoreInput: |
|
// Don't advance input_offset_ - the decoder needs to see the same |
|
// bytes again on the next call with more data appended. |
|
if (IsAllDataReceived()) { |
|
SetFailed(); |
|
} |
|
return; |
|
|
|
case JxlRsStatus::BasicInfo: { |
|
basic_info_ = (*decoder_)->get_basic_info(); |
|
|
|
if (!SetSize(basic_info_.width, basic_info_.height)) { |
|
return; |
|
} |
|
|
|
// Check for HDR. |
|
if (basic_info_.bits_per_sample > 8) { |
|
is_high_bit_depth_ = true; |
|
} |
|
|
|
// Enable F16 decoding for high bit depth images. |
|
decode_to_half_float_ = |
|
ImageIsHighBitDepth() && |
|
high_bit_depth_decoding_option_ == kHighBitDepthToHalfFloat; |
|
|
|
// Configure decoder for F16 output when high bit depth. |
|
if (decode_to_half_float_) { |
|
(*decoder_)->set_pixel_format(JxlRsPixelFormat::RgbaF16); |
|
} |
|
|
|
// Extract and set ICC color profile for wide gamut support. |
|
// Skip if color management is disabled (ColorBehavior::kIgnore). |
|
if (!IgnoresColorSpace()) { |
|
auto icc_data = (*decoder_)->get_icc_profile(); |
|
if (!icc_data.empty()) { |
|
// Copy ICC data to a Vector for safe span access. |
|
Vector<uint8_t> icc_copy; |
|
icc_copy.AppendRange(icc_data.begin(), icc_data.end()); |
|
auto profile = ColorProfile::Create(base::span(icc_copy)); |
|
if (profile) { |
|
SetEmbeddedColorProfile(std::move(profile)); |
|
} |
|
} |
|
} |
|
|
|
have_metadata_ = true; |
|
|
|
// For animations, reserve space for first frame info. |
|
// The actual frame info will be filled in when we get the Frame event. |
|
if (basic_info_.have_animation && frame_info_.empty()) { |
|
frame_info_.resize(1); |
|
} |
|
|
|
// In only_size mode, we must continue processing to discover all |
|
// frames, so we don't return here, just break. |
|
break; |
|
} |
|
|
|
case JxlRsStatus::Frame: { |
|
JxlRsFrameHeader header = (*decoder_)->get_frame_header(); |
|
|
|
if (basic_info_.have_animation) { |
|
// Frame duration is already in milliseconds from jxl-rs. |
|
FrameInfo info; |
|
info.header = header; |
|
info.duration = base::Milliseconds(header.duration); |
|
info.received = false; |
|
|
|
// Determine frame index based on mode. |
|
wtf_size_t frame_idx = |
|
only_size ? num_frame_events_in_scan_ : num_decoded_frames_; |
|
|
|
if (frame_idx < frame_info_.size()) { |
|
// Update existing entry (might be from a previous scan). |
|
frame_info_[frame_idx] = info; |
|
} else { |
|
// Add new frame info. |
|
frame_info_.push_back(info); |
|
DVLOG(1) << "JXL discovered frame " << frame_idx |
|
<< " (total: " << frame_info_.size() << ")" |
|
<< " only_size=" << only_size; |
|
} |
|
} |
|
break; |
|
} |
|
|
|
case JxlRsStatus::FullImage: { |
|
if (only_size) { |
|
// In metadata scan mode, we don't decode pixels, just update the |
|
// frame count and continue scanning for more frames. |
|
num_frame_events_in_scan_++; |
|
if (!(*decoder_)->has_more_frames()) { |
|
input_offset_ += result.bytes_consumed; |
|
all_frames_discovered_ = true; |
|
return; // End of metadata scan. |
|
} |
|
// Note: Don't advance input_offset_ here - it will be advanced |
|
// after the switch when we continue scanning. |
|
break; // Continue scanning. |
|
} |
|
|
|
// Full decode logic. |
|
wtf_size_t frame_index = num_decoded_frames_; |
|
|
|
// Ensure frame buffer cache is large enough. |
|
if (frame_buffer_cache_.size() <= frame_index) { |
|
frame_buffer_cache_.resize(frame_index + 1); |
|
} |
|
|
|
if (!InitFrameBuffer(frame_index)) { |
|
SetFailed(); |
|
return; |
|
} |
|
|
|
ImageFrame& frame = frame_buffer_cache_[frame_index]; |
|
frame.SetHasAlpha(basic_info_.has_alpha); |
|
|
|
base::CheckedNumeric<size_t> checked_pixel_count = |
|
base::CheckMul(basic_info_.width, basic_info_.height); |
|
if (!checked_pixel_count.IsValid()) { |
|
SetFailed(); |
|
return; |
|
} |
|
const size_t pixel_count = checked_pixel_count.ValueOrDie(); |
|
bool premultiply = frame.PremultiplyAlpha() && frame.HasAlpha(); |
|
|
|
if (decode_to_half_float_) { |
|
// Native F16 path for wide gamut/HDR. |
|
// jxl-rs outputs F16 directly, 4 channels * 2 bytes = 8 bytes/pixel. |
|
base::CheckedNumeric<size_t> checked_size = |
|
base::CheckMul(pixel_count, 4, sizeof(uint16_t)); |
|
if (!checked_size.IsValid()) { |
|
SetFailed(); |
|
return; |
|
} |
|
size_t f16_pixel_size = checked_size.ValueOrDie(); |
|
if (pixel_buffer_.size() < f16_pixel_size) { |
|
pixel_buffer_.resize(f16_pixel_size); |
|
} |
|
|
|
// Get F16 pixels directly from decoder. |
|
auto pixel_span = |
|
rust::Slice<uint8_t>(pixel_buffer_.data(), f16_pixel_size); |
|
if ((*decoder_)->get_pixels(pixel_span) != JxlRsStatus::Success) { |
|
SetFailed(); |
|
return; |
|
} |
|
|
|
// Copy F16 pixels to frame buffer. |
|
// Use row-based iteration to avoid per-pixel division. |
|
base::span<const uint8_t> buffer_bytes(pixel_buffer_); |
|
const uint32_t width = basic_info_.width; |
|
const uint32_t height = basic_info_.height; |
|
|
|
for (uint32_t y = 0; y < height; ++y) { |
|
for (uint32_t x = 0; x < width; ++x) { |
|
// Calculate byte offset for this pixel (row-major layout). |
|
// Each pixel is 4 F16 values = 8 bytes. |
|
size_t byte_offset = (y * width + x) * 8; |
|
auto pixel_bytes = buffer_bytes.subspan(byte_offset, 8u); |
|
|
|
// Read F16 values (jxl-rs outputs in native endianness). |
|
uint16_t r = base::U16FromNativeEndian(pixel_bytes.subspan<0, 2>()); |
|
uint16_t g = base::U16FromNativeEndian(pixel_bytes.subspan<2, 2>()); |
|
uint16_t b = base::U16FromNativeEndian(pixel_bytes.subspan<4, 2>()); |
|
uint16_t a = base::U16FromNativeEndian(pixel_bytes.subspan<6, 2>()); |
|
|
|
// TODO(nicholassig): Premultiply in F16 if needed. |
|
// For now, premultiplication is not applied to F16 output. |
|
// This matches the behavior of other HDR decoders. |
|
(void)premultiply; |
|
|
|
uint64_t* dst = frame.GetAddrF16(x, y); |
|
*dst = (static_cast<uint64_t>(a) << 48) | |
|
(static_cast<uint64_t>(b) << 32) | |
|
(static_cast<uint64_t>(g) << 16) | |
|
static_cast<uint64_t>(r); |
|
} |
|
} |
|
} else { |
|
// U8 path for standard 8-bit images. |
|
base::CheckedNumeric<size_t> checked_size = |
|
base::CheckMul(pixel_count, 4); |
|
if (!checked_size.IsValid()) { |
|
SetFailed(); |
|
return; |
|
} |
|
size_t pixel_size = checked_size.ValueOrDie(); |
|
if (pixel_buffer_.size() < pixel_size) { |
|
pixel_buffer_.resize(pixel_size); |
|
} |
|
|
|
// Get U8 pixels from decoder. |
|
auto pixel_span = |
|
rust::Slice<uint8_t>(pixel_buffer_.data(), pixel_size); |
|
if ((*decoder_)->get_pixels(pixel_span) != JxlRsStatus::Success) { |
|
SetFailed(); |
|
return; |
|
} |
|
|
|
// Use row-based iteration to avoid per-pixel division. |
|
base::span<const uint8_t> src_bytes(pixel_buffer_); |
|
const uint32_t width = basic_info_.width; |
|
const uint32_t height = basic_info_.height; |
|
const size_t row_stride = width * 4; |
|
|
|
if (premultiply) { |
|
for (uint32_t y = 0; y < height; ++y) { |
|
auto row = src_bytes.subspan(y * row_stride, row_stride); |
|
|
|
for (uint32_t x = 0; x < width; ++x) { |
|
auto pixel = row.subspan(x * 4, 4u); |
|
uint8_t r = pixel[0]; |
|
uint8_t g = pixel[1]; |
|
uint8_t b = pixel[2]; |
|
uint8_t a = pixel[3]; |
|
|
|
// Fast premultiplication: (x * a + 127) / 255 ≈ (x * a + 128) |
|
// >> 8. |
|
r = (r * a + 128) >> 8; |
|
g = (g * a + 128) >> 8; |
|
b = (b * a + 128) >> 8; |
|
|
|
ImageFrame::PixelData* dst = frame.GetAddr(x, y); |
|
*dst = (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | |
|
(g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); |
|
} |
|
} |
|
} else { |
|
for (uint32_t y = 0; y < height; ++y) { |
|
auto row = src_bytes.subspan(y * row_stride, row_stride); |
|
|
|
for (uint32_t x = 0; x < width; ++x) { |
|
auto pixel = row.subspan(x * 4, 4u); |
|
ImageFrame::PixelData* dst = frame.GetAddr(x, y); |
|
*dst = (pixel[3] << SK_A32_SHIFT) | (pixel[0] << SK_R32_SHIFT) | |
|
(pixel[1] << SK_G32_SHIFT) | (pixel[2] << SK_B32_SHIFT); |
|
} |
|
} |
|
} |
|
} |
|
|
|
frame.SetPixelsChanged(true); |
|
frame.SetStatus(ImageFrame::kFrameComplete); |
|
|
|
if (frame_index < frame_info_.size()) { |
|
frame_info_[frame_index].received = true; |
|
} |
|
|
|
num_decoded_frames_++; |
|
|
|
// Check if we've decoded the requested frame. |
|
if (frame_index >= index) { |
|
input_offset_ += result.bytes_consumed; |
|
return; |
|
} |
|
|
|
// Check for more frames. |
|
if (!(*decoder_)->has_more_frames()) { |
|
all_frames_discovered_ = true; |
|
} |
|
break; |
|
} |
|
|
|
case JxlRsStatus::Success: |
|
input_offset_ += result.bytes_consumed; |
|
all_frames_discovered_ = true; |
|
return; |
|
|
|
default: |
|
SetFailed(); |
|
return; |
|
} |
|
|
|
// Advance input offset after successful processing. |
|
// (NeedMoreInput returns early above without advancing.) |
|
input_offset_ += result.bytes_consumed; |
|
} |
|
} |
|
|
|
bool JXLImageDecoder::CanReusePreviousFrameBuffer( |
|
wtf_size_t frame_index) const { |
|
DCHECK(frame_index < frame_buffer_cache_.size()); |
|
return true; |
|
} |
|
|
|
bool JXLImageDecoder::FrameIsReceivedAtIndex(wtf_size_t index) const { |
|
return IsAllDataReceived() || |
|
(index < frame_buffer_cache_.size() && |
|
frame_buffer_cache_[index].GetStatus() == ImageFrame::kFrameComplete); |
|
} |
|
|
|
std::optional<base::TimeDelta> JXLImageDecoder::FrameTimestampAtIndex( |
|
wtf_size_t index) const { |
|
return index < frame_buffer_cache_.size() |
|
? frame_buffer_cache_[index].Timestamp() |
|
: std::nullopt; |
|
} |
|
|
|
base::TimeDelta JXLImageDecoder::FrameDurationAtIndex(wtf_size_t index) const { |
|
return index < frame_buffer_cache_.size() |
|
? frame_buffer_cache_[index].Duration() |
|
: base::TimeDelta(); |
|
} |
|
|
|
int JXLImageDecoder::RepetitionCount() const { |
|
if (!basic_info_.have_animation) { |
|
return kAnimationNone; |
|
} |
|
|
|
if (basic_info_.animation_loop_count == 0) { |
|
return kAnimationLoopInfinite; |
|
} |
|
return basic_info_.animation_loop_count; |
|
} |
|
|
|
wtf_size_t JXLImageDecoder::ClearCacheExceptFrame( |
|
wtf_size_t clear_except_frame) { |
|
// For animated JXL images that have been fully decoded, keep ALL frames. |
|
// JXL requires sequential decoding - evicting any frame means re-decoding |
|
// the entire animation from frame 0, which causes jitter on pages with |
|
// multiple animations. Trade-off: Uses more memory but ensures smooth |
|
// playback. |
|
if (basic_info_.have_animation && all_frames_decoded_) { |
|
return 0; // Keep all frames cached - don't clear anything |
|
} |
|
|
|
// For animations still being decoded, keep current and previous frame |
|
// to avoid flicker if the compositor briefly references the previous frame. |
|
if (basic_info_.have_animation && clear_except_frame != kNotFound) { |
|
const wtf_size_t previous_frame = |
|
clear_except_frame ? clear_except_frame - 1 : kNotFound; |
|
return ClearCacheExceptTwoFrames(clear_except_frame, previous_frame); |
|
} |
|
|
|
return ImageDecoder::ClearCacheExceptFrame(clear_except_frame); |
|
} |
|
|
|
SkColorType JXLImageDecoder::GetSkColorType() const { |
|
if (is_high_bit_depth_ && |
|
high_bit_depth_decoding_option_ == kHighBitDepthToHalfFloat) { |
|
return kRGBA_F16_SkColorType; |
|
} |
|
return kN32_SkColorType; |
|
} |
|
|
|
void JXLImageDecoder::DecodeAllFrames() { |
|
if (all_frames_decoded_ || Failed()) { |
|
return; |
|
} |
|
|
|
// Mark as decoded first to prevent re-entry. |
|
all_frames_decoded_ = true; |
|
|
|
wtf_size_t total_frames = frame_info_.size(); |
|
if (total_frames == 0) { |
|
return; |
|
} |
|
|
|
// Decode each frame sequentially: 0, 1, 2, ... |
|
// This is simpler and more reliable than trying to decode the last frame |
|
// and relying on the decode loop to fill in all previous frames. |
|
for (wtf_size_t i = 0; i < total_frames && !Failed(); ++i) { |
|
// Skip if already decoded. |
|
if (i < frame_buffer_cache_.size() && |
|
frame_buffer_cache_[i].GetStatus() == ImageFrame::kFrameComplete) { |
|
continue; |
|
} |
|
Decode(i, /*only_size=*/false); |
|
} |
|
|
|
// Verify all frames are actually decoded. |
|
if (!Failed()) { |
|
for (wtf_size_t i = 0; i < total_frames && i < frame_buffer_cache_.size(); |
|
++i) { |
|
if (frame_buffer_cache_[i].GetStatus() != ImageFrame::kFrameComplete) { |
|
all_frames_decoded_ = false; |
|
break; |
|
} |
|
} |
|
} |
|
} |
|
|
|
} // namespace blink |