From ee36cc5d96f3539200fe12bcc919467997a96cf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Dec 2025 13:23:28 +0100 Subject: [PATCH 1/7] reader tests, new unpad reader --- storage/sealer/fr32/readers.go | 212 +++++++---- storage/sealer/fr32/readers_test.go | 547 ++++++++++++++++++++++++++++ 2 files changed, 678 insertions(+), 81 deletions(-) diff --git a/storage/sealer/fr32/readers.go b/storage/sealer/fr32/readers.go index b499bc422cf..99c160436f4 100644 --- a/storage/sealer/fr32/readers.go +++ b/storage/sealer/fr32/readers.go @@ -2,143 +2,192 @@ package fr32 import ( "io" - "math/bits" - pool "github.com/libp2p/go-buffer-pool" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" ) +// unpadReader implements an io.Reader that reads fr32-padded data from an +// underlying reader and returns unpadded data. +// +// Design: Similar to bufio.Reader, this uses a simple fill-and-read pattern: +// - padbuf: holds padded data read from the source +// - unpadbuf: holds unpadded data ready for the consumer +// - fill() reads padded data, unpads it, stores in unpadbuf +// - Read() copies from unpadbuf to the caller's buffer type unpadReader struct { src io.Reader - left uint64 - work []byte + // padbuf holds padded data read from src + padbuf []byte + // unpadbuf holds unpadded data ready for consumer + unpadbuf []byte - stash []byte + // r, w are read and write positions in unpadbuf + // Data in unpadbuf[r:w] is available for reading + r, w int + + // err stores any error from the underlying reader + err error + + // left is how many padded bytes remain to read from src + left uint64 } +// BufSize returns an appropriate buffer size for the given padded piece size. +// The returned size is suitable for use with NewUnpadReaderBuf. func BufSize(sz abi.PaddedPieceSize) int { return int(MTTresh * mtChunkCount(sz)) } +// NewUnpadReader creates a new unpadding reader with an automatically sized buffer. func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { buf := make([]byte, BufSize(sz)) - return NewUnpadReaderBuf(src, sz, buf) } +// NewUnpadReaderBuf creates a new unpadding reader using the provided buffer. +// The buffer must be a valid padded piece size (power of 2) and at least 256 bytes. +// The buffer is split internally: half for reading padded data, half for unpadded output. func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) { if err := sz.Validate(); err != nil { return nil, xerrors.Errorf("bad piece size: %w", err) } if abi.PaddedPieceSize(len(buf)).Validate() != nil { - return nil, xerrors.Errorf("bad buffer size") + return nil, xerrors.Errorf("bad buffer size: must be a valid padded piece size") } - return &unpadReader{ - src: src, + // We split the buffer in half: padbuf for reading padded data, unpadbuf for output. + // padbuf needs to be at least 128 bytes (1 chunk), so buf must be at least 256. + if len(buf) < 256 { + return nil, xerrors.Errorf("buffer too small: must be at least 256 bytes") + } + + // Split buffer in half. + // Since buf is a power of 2 >= 256, half is a power of 2 >= 128. + halfSize := len(buf) / 2 - left: uint64(sz), - work: buf, + return &unpadReader{ + src: src, + padbuf: buf[:halfSize], + unpadbuf: buf[halfSize:], + left: uint64(sz), }, nil } -func (r *unpadReader) Read(out []byte) (int, error) { - idealReadSize := abi.PaddedPieceSize(len(r.work)).Unpadded() - - var err error - var rn int - if len(r.stash) == 0 && len(out) < int(idealReadSize) { - r.stash = pool.Get(int(idealReadSize)) +// fill reads padded data from src, unpads it, and stores in unpadbuf. +func (r *unpadReader) fill() { + // Slide existing data to beginning of buffer + if r.r > 0 { + copy(r.unpadbuf, r.unpadbuf[r.r:r.w]) + r.w -= r.r + r.r = 0 + } - rn, err = r.readInner(r.stash) - r.stash = r.stash[:rn] + // Check if we already have an error or no more data to read + if r.err != nil { + return + } + if r.left == 0 { + r.err = io.EOF + return } - if len(r.stash) > 0 { - n := copy(out, r.stash) - r.stash = r.stash[n:] + // Calculate how much padded data to read. + // We need to ensure the unpadded result fits in remaining unpadbuf space. + availSpace := len(r.unpadbuf) - r.w - if len(r.stash) == 0 { - pool.Put(r.stash) - r.stash = nil - } + // Each 128 padded bytes produces 127 unpadded bytes + maxChunks := availSpace / 127 + if maxChunks == 0 { + return // Buffer too full to process even one chunk + } - if err == io.EOF && rn > n { - err = nil - } + // Clamp to what padbuf can hold + padBufChunks := len(r.padbuf) / 128 + if maxChunks > padBufChunks { + maxChunks = padBufChunks + } - return n, err + // Clamp to what's left to read + toReadPadded := maxChunks * 128 + if uint64(toReadPadded) > r.left { + toReadPadded = int(r.left) + // Round down to complete chunks + toReadPadded = (toReadPadded / 128) * 128 } - return r.readInner(out) -} + if toReadPadded == 0 { + // Less than one full chunk remaining + r.err = io.EOF + return + } -// readInner reads from the underlying reader into the provided buffer. -// It requires that out[] is padded(power-of-two).unpadded()-sized, ideally quite large. -func (r *unpadReader) readInner(out []byte) (int, error) { - if r.left == 0 { - return 0, io.EOF + // Read padded data from source + n, err := io.ReadFull(r.src, r.padbuf[:toReadPadded]) + if err == io.EOF || err == io.ErrUnexpectedEOF { + // Partial or no data - process complete chunks only + completeChunks := n / 128 + if completeChunks == 0 { + r.err = io.EOF + return + } + validPadded := completeChunks * 128 + r.left -= uint64(validPadded) + + // Unpad the complete chunks + unpadSize := completeChunks * 127 + Unpad(r.padbuf[:validPadded], r.unpadbuf[r.w:r.w+unpadSize]) + r.w += unpadSize + r.err = io.EOF + return + } + if err != nil { + r.err = err + return } - chunks := len(out) / 127 + // Successfully read toReadPadded bytes + r.left -= uint64(n) - outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(chunks*128))) + // Unpad the data + chunks := n / 128 + unpadSize := chunks * 127 + Unpad(r.padbuf[:n], r.unpadbuf[r.w:r.w+unpadSize]) + r.w += unpadSize - if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil { - return 0, xerrors.Errorf("output must be of valid padded piece size: %w", err) + // If we've read everything, mark EOF for next fill + if r.left == 0 { + r.err = io.EOF } +} - // Clamp `todo` to the length of the work buffer to prevent buffer overflows - todo := min(abi.PaddedPieceSize(outTwoPow), abi.PaddedPieceSize(len(r.work))) - if r.left < uint64(todo) { - todo = abi.PaddedPieceSize(1 << (63 - bits.LeadingZeros64(r.left))) +func (r *unpadReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil } - r.left -= uint64(todo) - - n, err := io.ReadAtLeast(r.src, r.work[:todo], int(todo)) - if err == io.ErrUnexpectedEOF { - // We got a partial read. This happens when the underlying reader - // doesn't have as much data as expected (e.g., non-power-of-2 pieces). - // Process what we got. - if n > 0 { - // Round down to complete 128-byte chunks - completeChunks := n / 128 - if completeChunks > 0 { - validBytes := completeChunks * 128 - Unpad(r.work[:validBytes], out[:completeChunks*127]) - // Adjust left to reflect that we couldn't read everything - r.left = 0 - return completeChunks * 127, io.EOF - } + // If buffer is empty, fill it + if r.r == r.w { + if r.err != nil { + return 0, r.err } - // Not enough data for even one chunk - return 0, io.EOF - } - if err == io.EOF { - // Clean EOF with no data - if n == 0 { - return 0, io.EOF + r.fill() + // After fill, check again + if r.r == r.w { + return 0, r.err } - // Got some data with EOF - shouldn't happen with ReadAtLeast but handle it - return 0, xerrors.Errorf("unexpected EOF with partial read: %d bytes", n) } - if err != nil { - return 0, err - } - if n < int(todo) { - return 0, xerrors.Errorf("short read without EOF: got %d, expected %d", n, todo) - } - - Unpad(r.work[:todo], out[:todo.Unpadded()]) - return int(todo.Unpadded()), err + // Copy from buffer to p + n = copy(p, r.unpadbuf[r.r:r.w]) + r.r += n + return n, nil } +// padWriter implements an io.WriteCloser that pads data with fr32 padding. type padWriter struct { dst io.Writer @@ -146,6 +195,7 @@ type padWriter struct { work []byte } +// NewPadWriter creates a new padding writer. func NewPadWriter(dst io.Writer) io.WriteCloser { return &padWriter{ dst: dst, diff --git a/storage/sealer/fr32/readers_test.go b/storage/sealer/fr32/readers_test.go index f6d23782080..8713132bb94 100644 --- a/storage/sealer/fr32/readers_test.go +++ b/storage/sealer/fr32/readers_test.go @@ -243,3 +243,550 @@ func TestUnpadReaderPartialPiece(t *testing.T) { require.Equal(t, int(actualUnpadded), len(result), "Should read all available data") require.Equal(t, unpadded, result, "Data should match original") } + +// TestUnpadReaderSizeMismatch_PieceProviderPattern reproduces the bug where +// piece_provider.go creates an unpadReader with pieceSize.Padded() but the +// underlying reader only has a portion of the data. This simulates reading +// a range from a piece. +// +// The bug: When NewUnpadReaderBuf is given a size much larger than the +// underlying reader actually has, data corruption occurs at the boundary +// where the underlying reader exhausts. +func TestUnpadReaderSizeMismatch_PieceProviderPattern(t *testing.T) { + // Simulate a large piece (e.g., 32MiB) - must be power of 2 + fullPiecePadded := abi.PaddedPieceSize(32 << 20) // 32MiB padded (power of 2) + fullPieceUnpadded := fullPiecePadded.Unpadded() + + // But we only want to read a portion (e.g., 8MiB range) - must be power of 2 + rangePadded := abi.PaddedPieceSize(8 << 20) // 8MiB padded (power of 2) + rangeUnpadded := rangePadded.Unpadded() + + // Generate the full piece data + fullData := make([]byte, fullPieceUnpadded) + n, err := rand.Read(fullData) + require.NoError(t, err) + require.Equal(t, int(fullPieceUnpadded), n) + + // Pad only the range we'll provide (simulating what readerGetter returns) + rangeData := fullData[:rangeUnpadded] + paddedRange := make([]byte, rangePadded) + fr32.Pad(rangeData, paddedRange) + + // Create underlying reader with ONLY the range data + underlyingReader := bytes.NewReader(paddedRange) + + // BUG REPRODUCTION: Create UnpadReader with FULL piece size + // (this is what piece_provider.go does incorrectly) + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + // Try to read all available data + result := make([]byte, 0, rangeUnpadded) + readBuf := make([]byte, 64*1024) // 64KB read buffer + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Got error after reading %d bytes: %v", len(result), err) + break + } + } + + // Verify we got the correct amount of data + require.Equal(t, int(rangeUnpadded), len(result), + "Should read exactly the available data, got %d, expected %d", len(result), rangeUnpadded) + + // Verify data integrity - no corruption + require.Equal(t, rangeData, result, "Data should match original without corruption") +} + +// TestUnpadReaderSizeMismatch_8MiBBoundary specifically tests the 8MiB boundary +// issue where zeros appear and subsequent data is bit-shifted. +func TestUnpadReaderSizeMismatch_8MiBBoundary(t *testing.T) { + // Simulate reading exactly at the 8MiB boundary + // 8MiB = 0x800000 bytes (power of 2) + fullPiecePadded := abi.PaddedPieceSize(64 << 20) // 64MiB full piece (power of 2) + fullPieceUnpadded := fullPiecePadded.Unpadded() + + // Underlying reader has exactly 8MiB of padded data (power of 2) + eightMiBPadded := abi.PaddedPieceSize(8 << 20) + eightMiBUnpadded := eightMiBPadded.Unpadded() + + // Generate test data with a recognizable pattern + fullData := make([]byte, fullPieceUnpadded) + // Fill with pattern: each 127-byte chunk starts with its chunk number + for i := 0; i < int(fullPieceUnpadded); i++ { + chunkNum := i / 127 + posInChunk := i % 127 + fullData[i] = byte((chunkNum + posInChunk) & 0xFF) + } + + // Pad only the 8MiB range + rangeData := fullData[:eightMiBUnpadded] + paddedRange := make([]byte, eightMiBPadded) + fr32.Pad(rangeData, paddedRange) + + underlyingReader := bytes.NewReader(paddedRange) + + // BUG: Create with full piece size but only 8MiB available + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + // Read all available data + result := make([]byte, 0, eightMiBUnpadded+1024) + readBuf := make([]byte, 128*1024) + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error after %d bytes: %v", len(result), err) + break + } + } + + // Check for zeros near the 8MiB boundary (0x7fffe0 to 0x800000 in the original bug) + // These offsets are in unpadded space + boundaryStart := int(eightMiBUnpadded) - 256 // Check last 256 bytes + if len(result) >= boundaryStart+256 { + boundaryData := result[boundaryStart : boundaryStart+256] + zeros := 0 + for _, b := range boundaryData { + if b == 0 { + zeros++ + } + } + // The bug caused zeros to appear where they shouldn't + expectedData := rangeData[boundaryStart : boundaryStart+256] + expectedZeros := 0 + for _, b := range expectedData { + if b == 0 { + expectedZeros++ + } + } + if zeros > expectedZeros+10 { + t.Errorf("Found %d unexpected zeros near 8MiB boundary (expected ~%d)", zeros, expectedZeros) + } + } + + // Verify we got correct amount of data + require.Equal(t, int(eightMiBUnpadded), len(result), + "Should read all available data without data loss") + + // Verify data integrity + require.Equal(t, rangeData, result, "Data should match original") +} + +// TestUnpadReaderSizeMismatch_BitShiftCorruption tests that data doesn't get +// bit-shifted when the underlying reader exhausts before the declared size. +func TestUnpadReaderSizeMismatch_BitShiftCorruption(t *testing.T) { + // Create a scenario where partial chunk handling could cause bit shift + // Both sizes must be power of 2 + fullPiecePadded := abi.PaddedPieceSize(16 << 20) // 16MiB (power of 2) + fullPieceUnpadded := fullPiecePadded.Unpadded() + + // Use 8MiB as the actual available data (power of 2) + rangePadded := abi.PaddedPieceSize(8 << 20) // 8MiB (power of 2) + rangeUnpadded := rangePadded.Unpadded() + + // Generate data with a specific bit pattern to detect shifts + fullData := make([]byte, fullPieceUnpadded) + for i := range fullData { + // Pattern: 0xAA (10101010) - easy to see if bits shift + fullData[i] = 0xAA + } + + rangeData := fullData[:rangeUnpadded] + paddedRange := make([]byte, rangePadded) + fr32.Pad(rangeData, paddedRange) + + underlyingReader := bytes.NewReader(paddedRange) + + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + result := make([]byte, 0, rangeUnpadded) + readBuf := make([]byte, 64*1024) + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + break + } + } + + require.Equal(t, int(rangeUnpadded), len(result), "Should read all data") + + // Check for bit shift corruption + // If bits are shifted, 0xAA (10101010) would become something else + // like 0x55 (01010101) for 1-bit shift, or other patterns + shiftedBytes := 0 + for i, b := range result { + if b != 0xAA { + shiftedBytes++ + if shiftedBytes <= 10 { + t.Logf("Byte at offset %d (0x%x): got 0x%02x, expected 0xAA", i, i, b) + } + } + } + + if shiftedBytes > 0 { + t.Errorf("Found %d bytes with potential bit-shift corruption (expected 0)", shiftedBytes) + } +} + +// TestUnpadReaderSizeMismatch_MultipleReads tests the scenario where multiple +// sequential reads cross the boundary where underlying data exhausts. +func TestUnpadReaderSizeMismatch_MultipleReads(t *testing.T) { + fullPiecePadded := abi.PaddedPieceSize(32 << 20) + + // Underlying has 4MiB + actualPadded := abi.PaddedPieceSize(4 << 20) + actualUnpadded := actualPadded.Unpadded() + + // Generate random data + fullData := make([]byte, actualUnpadded) + _, err := rand.Read(fullData) + require.NoError(t, err) + + paddedData := make([]byte, actualPadded) + fr32.Pad(fullData, paddedData) + + underlyingReader := bytes.NewReader(paddedData) + + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + // Read in small chunks to trigger multiple reads across boundary + result := make([]byte, 0, actualUnpadded) + readBuf := make([]byte, 127*100) // ~12KB reads + + readCount := 0 + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + readCount++ + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error on read %d after %d bytes: %v", readCount, len(result), err) + break + } + } + + t.Logf("Completed %d reads, got %d bytes", readCount, len(result)) + + require.Equal(t, int(actualUnpadded), len(result), + "Should read all available data (%d bytes), got %d", actualUnpadded, len(result)) + require.Equal(t, fullData, result, "Data integrity check failed") +} + +// TestUnpadReaderSizeMismatch_SmallReadBuffer tests the issue with small read +// buffers that trigger the stash mechanism. +func TestUnpadReaderSizeMismatch_SmallReadBuffer(t *testing.T) { + fullPiecePadded := abi.PaddedPieceSize(16 << 20) + + actualPadded := abi.PaddedPieceSize(2 << 20) // 2MiB actual + actualUnpadded := actualPadded.Unpadded() + + fullData := make([]byte, actualUnpadded) + _, err := rand.Read(fullData) + require.NoError(t, err) + + paddedData := make([]byte, actualPadded) + fr32.Pad(fullData, paddedData) + + underlyingReader := bytes.NewReader(paddedData) + + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + // Use very small reads to heavily exercise the stash mechanism + result := make([]byte, 0, actualUnpadded) + readBuf := make([]byte, 100) // Very small - will trigger stash + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error after %d bytes: %v", len(result), err) + break + } + } + + require.Equal(t, int(actualUnpadded), len(result), "Should read all data") + require.Equal(t, fullData, result, "Data should match") +} + +// TestUnpadReaderSizeMismatch_OffsetRead simulates the piece_provider pattern +// more closely: reading from an offset within a piece, where the underlying +// reader only has data for the requested range but the unpadReader is told +// about the full piece size. +func TestUnpadReaderSizeMismatch_OffsetRead(t *testing.T) { + // Full piece is 64MiB + fullPiecePadded := abi.PaddedPieceSize(64 << 20) + fullPieceUnpadded := fullPiecePadded.Unpadded() + + // Generate full piece data + fullData := make([]byte, fullPieceUnpadded) + _, err := rand.Read(fullData) + require.NoError(t, err) + + // Pad the full piece + fullPadded := make([]byte, fullPiecePadded) + fr32.Pad(fullData, fullPadded) + + // We want to read starting at 8MiB offset, for 4MiB + // These must be fr32-chunk-aligned (127 byte boundaries for unpadded) + startOffsetUnpadded := abi.UnpaddedPieceSize(8 << 20) // 8MiB + startOffsetUnpadded = (startOffsetUnpadded / 127) * 127 // Align to 127 + readSizeUnpadded := abi.UnpaddedPieceSize(4 << 20) // 4MiB + readSizeUnpadded = (readSizeUnpadded / 127) * 127 // Align to 127 + endOffsetUnpadded := startOffsetUnpadded + readSizeUnpadded + + startOffsetPadded := startOffsetUnpadded.Padded() + endOffsetPadded := endOffsetUnpadded.Padded() + rangePadded := endOffsetPadded - startOffsetPadded + + // Extract the padded range from the full padded data + paddedRange := fullPadded[startOffsetPadded:endOffsetPadded] + + // Create reader with only the range data + underlyingReader := bytes.NewReader(paddedRange) + + // BUG PATTERN: Create unpadReader with full piece size but only range available + // Note: We use a valid piece size that encompasses our range + declaredSize := abi.PaddedPieceSize(rangePadded) + // Round up to valid piece size (power of 2) + for declaredSize&(declaredSize-1) != 0 { + declaredSize = declaredSize & (declaredSize - 1) + } + declaredSize *= 2 + if declaredSize < 128 { + declaredSize = 128 + } + + // Try with declared size = full piece size (the bug pattern) + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + // Read all available data + result := make([]byte, 0, readSizeUnpadded) + readBuf := make([]byte, 64*1024) + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error after %d bytes: %v", len(result), err) + break + } + } + + expectedData := fullData[startOffsetUnpadded:endOffsetUnpadded] + + require.Equal(t, len(expectedData), len(result), + "Should read exactly %d bytes, got %d", len(expectedData), len(result)) + require.Equal(t, expectedData, result, "Data should match without corruption") +} + +// TestUnpadReaderSizeMismatch_WorkBufferBoundary tests the specific case where +// the underlying data exhausts exactly at a work buffer boundary. +func TestUnpadReaderSizeMismatch_WorkBufferBoundary(t *testing.T) { + // Use MTTresh (512KB) as a boundary point + mtTresh := fr32.MTTresh + + // Full piece is larger than MTTresh + fullPiecePadded := abi.PaddedPieceSize(16 << 20) // 16MiB + + // Actual available data is exactly MTTresh (512KB padded) + actualPadded := abi.PaddedPieceSize(mtTresh) + actualUnpadded := actualPadded.Unpadded() + + // Generate and pad data + originalData := make([]byte, actualUnpadded) + _, err := rand.Read(originalData) + require.NoError(t, err) + + paddedData := make([]byte, actualPadded) + fr32.Pad(originalData, paddedData) + + underlyingReader := bytes.NewReader(paddedData) + + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + // Read in chunks that don't align with MTTresh + result := make([]byte, 0, actualUnpadded) + readBuf := make([]byte, 100*1024) // 100KB reads + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error after %d bytes: %v", len(result), err) + break + } + } + + require.Equal(t, int(actualUnpadded), len(result), + "Should read all %d bytes, got %d", actualUnpadded, len(result)) + require.Equal(t, originalData, result, "Data integrity check failed") +} + +// TestUnpadReaderSizeMismatch_NonPowerOf2ActualSize tests when the actual +// available data is not a nice power of 2. This is closer to real-world +// scenarios where piece ranges might not align perfectly. +func TestUnpadReaderSizeMismatch_NonPowerOf2ActualSize(t *testing.T) { + fullPiecePadded := abi.PaddedPieceSize(16 << 20) + + // Actual data is 100 chunks = 12800 bytes padded (not power of 2) + numChunks := 100 + actualPadded := abi.PaddedPieceSize(numChunks * 128) + actualUnpadded := abi.UnpaddedPieceSize(numChunks * 127) + + originalData := make([]byte, actualUnpadded) + _, err := rand.Read(originalData) + require.NoError(t, err) + + paddedData := make([]byte, actualPadded) + fr32.Pad(originalData, paddedData) + + underlyingReader := bytes.NewReader(paddedData) + + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + result := make([]byte, 0, actualUnpadded) + readBuf := make([]byte, 4096) + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error after %d bytes: %v", len(result), err) + break + } + } + + require.Equal(t, int(actualUnpadded), len(result), + "Should read all %d bytes, got %d", actualUnpadded, len(result)) + require.Equal(t, originalData, result, "Data should match") +} + +// TestUnpadReaderSizeMismatch_ExactByteLoss tests reading data and verifies +// no bytes are lost at boundaries. This test specifically checks the range +// around 8MiB (0x800000) where the original bug was observed. +func TestUnpadReaderSizeMismatch_ExactByteLoss(t *testing.T) { + // Create a piece where we can detect exact byte loss + fullPiecePadded := abi.PaddedPieceSize(32 << 20) + + // Actual data is just over 8MiB to cross the boundary + targetBytes := uint64(8<<20) + 1024 // 8MiB + 1KB + // Round to chunk boundary + numChunks := (targetBytes + 126) / 127 + actualUnpadded := abi.UnpaddedPieceSize(numChunks * 127) + actualPadded := actualUnpadded.Padded() + + // Create sequential data so we can detect any loss or duplication + originalData := make([]byte, actualUnpadded) + for i := range originalData { + originalData[i] = byte(i & 0xFF) + } + + paddedData := make([]byte, actualPadded) + fr32.Pad(originalData, paddedData) + + underlyingReader := bytes.NewReader(paddedData) + + buf := make([]byte, fr32.BufSize(fullPiecePadded)) + unpadReader, err := fr32.NewUnpadReaderBuf(underlyingReader, fullPiecePadded, buf) + require.NoError(t, err) + + result := make([]byte, 0, actualUnpadded) + readBuf := make([]byte, 64*1024) + + for { + n, err := unpadReader.Read(readBuf) + if n > 0 { + result = append(result, readBuf[:n]...) + } + if err == io.EOF { + break + } + if err != nil { + t.Logf("Error after %d bytes: %v", len(result), err) + break + } + } + + // Check length + require.Equal(t, int(actualUnpadded), len(result), + "Byte count mismatch: expected %d, got %d", actualUnpadded, len(result)) + + // Check data integrity byte by byte around 8MiB boundary + boundaryOffset := 8 << 20 + checkStart := boundaryOffset - 256 + checkEnd := min(boundaryOffset+256, len(result)) + + if len(result) >= checkEnd { + for i := checkStart; i < checkEnd; i++ { + expected := byte(i & 0xFF) + if result[i] != expected { + t.Errorf("Byte mismatch at offset %d (0x%x): got 0x%02x, expected 0x%02x", + i, i, result[i], expected) + } + } + } + + // Full comparison + require.Equal(t, originalData, result, "Data integrity check failed") +} From 00e5b7f20e45cea3ab82e5a3496786bb9eabce89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Dec 2025 13:58:14 +0100 Subject: [PATCH 2/7] fix: fr32: multithreaded Unpad chunk boundary alignment --- storage/sealer/fr32/fr32.go | 19 ++++++++++++++++++- storage/sealer/fr32/readers.go | 32 +++++++++++++++----------------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/storage/sealer/fr32/fr32.go b/storage/sealer/fr32/fr32.go index 83a20597fa0..62ece1229c5 100644 --- a/storage/sealer/fr32/fr32.go +++ b/storage/sealer/fr32/fr32.go @@ -41,7 +41,14 @@ func mtChunkCount(usz abi.PaddedPieceSize) uint64 { func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { threads := mtChunkCount(abi.PaddedPieceSize(padLen)) - threadBytes := abi.PaddedPieceSize(padLen / int(threads)) + + // Ensure threadBytes is aligned to 128-byte chunk boundaries. + // Each fr32 chunk is 128 padded bytes / 127 unpadded bytes. + chunksPerThread := (padLen / int(threads)) / 128 + if chunksPerThread == 0 { + chunksPerThread = 1 + } + threadBytes := abi.PaddedPieceSize(chunksPerThread * 128) var wg sync.WaitGroup wg.Add(int(threads)) @@ -53,6 +60,16 @@ func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) { start := threadBytes * abi.PaddedPieceSize(thread) end := start + threadBytes + // Last thread takes any remainder + if thread == int(threads)-1 { + end = abi.PaddedPieceSize(padLen) + } + + // Skip if this thread has no work + if start >= abi.PaddedPieceSize(padLen) { + return + } + op(in[start.Unpadded():end.Unpadded()], out[start:end]) }(i) } diff --git a/storage/sealer/fr32/readers.go b/storage/sealer/fr32/readers.go index 99c160436f4..f42278e5e94 100644 --- a/storage/sealer/fr32/readers.go +++ b/storage/sealer/fr32/readers.go @@ -19,7 +19,7 @@ import ( type unpadReader struct { src io.Reader - // padbuf holds padded data read from src + // padbuf holds padded data read from src (MUST be separate from unpadbuf) padbuf []byte // unpadbuf holds unpadded data ready for consumer unpadbuf []byte @@ -48,8 +48,8 @@ func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { } // NewUnpadReaderBuf creates a new unpadding reader using the provided buffer. -// The buffer must be a valid padded piece size (power of 2) and at least 256 bytes. -// The buffer is split internally: half for reading padded data, half for unpadded output. +// The buffer must be a valid padded piece size (power of 2) and at least 128 bytes. +// The buffer is used for reading padded data; an internal buffer is allocated for unpadded output. func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) { if err := sz.Validate(); err != nil { return nil, xerrors.Errorf("bad piece size: %w", err) @@ -59,20 +59,18 @@ func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Re return nil, xerrors.Errorf("bad buffer size: must be a valid padded piece size") } - // We split the buffer in half: padbuf for reading padded data, unpadbuf for output. - // padbuf needs to be at least 128 bytes (1 chunk), so buf must be at least 256. - if len(buf) < 256 { - return nil, xerrors.Errorf("buffer too small: must be at least 256 bytes") + if len(buf) < 128 { + return nil, xerrors.Errorf("buffer too small: must be at least 128 bytes") } - // Split buffer in half. - // Since buf is a power of 2 >= 256, half is a power of 2 >= 128. - halfSize := len(buf) / 2 + // Calculate unpadbuf size: for N padded bytes, we produce N*127/128 unpadded bytes + padBufSize := len(buf) + unpadBufSize := (padBufSize / 128) * 127 return &unpadReader{ src: src, - padbuf: buf[:halfSize], - unpadbuf: buf[halfSize:], + padbuf: buf, + unpadbuf: make([]byte, unpadBufSize), left: uint64(sz), }, nil } @@ -111,7 +109,7 @@ func (r *unpadReader) fill() { maxChunks = padBufChunks } - // Clamp to what's left to read + // Clamp to what's left to read (in terms of declared size) toReadPadded := maxChunks * 128 if uint64(toReadPadded) > r.left { toReadPadded = int(r.left) @@ -120,7 +118,7 @@ func (r *unpadReader) fill() { } if toReadPadded == 0 { - // Less than one full chunk remaining + // Less than one full chunk remaining in declared size r.err = io.EOF return } @@ -137,7 +135,7 @@ func (r *unpadReader) fill() { validPadded := completeChunks * 128 r.left -= uint64(validPadded) - // Unpad the complete chunks + // Unpad the complete chunks into unpadbuf starting at r.w unpadSize := completeChunks * 127 Unpad(r.padbuf[:validPadded], r.unpadbuf[r.w:r.w+unpadSize]) r.w += unpadSize @@ -152,13 +150,13 @@ func (r *unpadReader) fill() { // Successfully read toReadPadded bytes r.left -= uint64(n) - // Unpad the data + // Unpad the data into unpadbuf starting at r.w chunks := n / 128 unpadSize := chunks * 127 Unpad(r.padbuf[:n], r.unpadbuf[r.w:r.w+unpadSize]) r.w += unpadSize - // If we've read everything, mark EOF for next fill + // If we've read everything from declared size, mark EOF for next fill if r.left == 0 { r.err = io.EOF } From 46cbe63e6f4cbd0101621d3c9e58fd7e558f3d39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Dec 2025 14:10:29 +0100 Subject: [PATCH 3/7] fr32: avoid allocation in NewUnpadReaderBuf --- storage/sealer/fr32/fr32_test.go | 129 ++++++++++++++++++++++++++++ storage/sealer/fr32/readers.go | 30 ++++--- storage/sealer/fr32/readers_test.go | 3 +- storage/sealer/piece_provider.go | 5 +- 4 files changed, 152 insertions(+), 15 deletions(-) diff --git a/storage/sealer/fr32/fr32_test.go b/storage/sealer/fr32/fr32_test.go index 605aaf54c13..413e0bcc28f 100644 --- a/storage/sealer/fr32/fr32_test.go +++ b/storage/sealer/fr32/fr32_test.go @@ -3,6 +3,7 @@ package fr32_test import ( "bytes" "crypto/rand" + "fmt" "io" "os" "testing" @@ -149,6 +150,134 @@ func TestRoundtrip16MRand(t *testing.T) { require.Equal(t, ffi, buf) } +// TestRoundtripMisalignedSizes tests the multithreaded Pad/Unpad with sizes that +// previously caused data corruption due to thread boundary misalignment. +// The bug occurred when (padLen / threads) was not a multiple of 128 bytes, +// causing partial chunks at thread boundaries to be skipped. +func TestRoundtripMisalignedSizes(t *testing.T) { + // These sizes are chosen to trigger the multithreaded path (> 512KB) + // and create thread boundaries that don't align to 128-byte chunks. + testCases := []struct { + name string + numChunks int + }{ + // 66061 chunks = 8455808 padded bytes + // With 16 threads: 8455808/16 = 528488 bytes per thread + // 528488/128 = 4128.5 - NOT aligned! This was the original bug case. + {"66061_chunks_8MiB_boundary", 66061}, + + // Various sizes that create misaligned thread boundaries + {"prime_chunks_1009", 1009 * 8}, // ~1MB, prime-ish number of chunks + {"odd_chunks_8193", 8193}, // Just over 8192 (power of 2) + {"odd_chunks_65537", 65537}, // Just over 65536 (power of 2) + {"odd_chunks_100003", 100003}, // Large prime-ish + {"boundary_chunks_66000", 66000}, // Near the original bug size + {"boundary_chunks_70000", 70000}, // Larger odd size + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + unpaddedSize := tc.numChunks * 127 + paddedSize := tc.numChunks * 128 + + // Skip if too large for this test + if paddedSize > 64<<20 { + t.Skip("Size too large for this test") + } + + input := make([]byte, unpaddedSize) + _, err := rand.Read(input) + require.NoError(t, err) + + padded := make([]byte, paddedSize) + fr32.Pad(input, padded) + + output := make([]byte, unpaddedSize) + fr32.Unpad(padded, output) + + require.Equal(t, input, output, "Roundtrip failed for %d chunks", tc.numChunks) + }) + } +} + +// TestUnpadMisalignedThreadBoundaries specifically tests the fix for the +// multithreaded Unpad bug where thread boundaries weren't aligned to +// 128-byte fr32 chunks, causing data loss. +func TestUnpadMisalignedThreadBoundaries(t *testing.T) { + // Create data that's just over 8MiB to trigger the original bug + // 66061 chunks * 127 bytes = 8389747 unpadded bytes + // 66061 chunks * 128 bytes = 8455808 padded bytes + numChunks := 66061 + unpaddedSize := numChunks * 127 + paddedSize := numChunks * 128 + + // Create sequential data so we can detect exactly where corruption occurs + input := make([]byte, unpaddedSize) + for i := range input { + input[i] = byte(i & 0xFF) + } + + padded := make([]byte, paddedSize) + fr32.Pad(input, padded) + + output := make([]byte, unpaddedSize) + fr32.Unpad(padded, output) + + // Check for corruption at thread boundaries + // With the original bug, corruption occurred at offsets like: + // 528384 (thread 0/1 boundary), 1056768 (thread 1/2 boundary), etc. + + // First verify total length + require.Equal(t, len(input), len(output), "Output length mismatch") + + // Check every byte + for i := 0; i < len(input); i++ { + if input[i] != output[i] { + // Find the extent of the corruption + corruptStart := i + corruptEnd := i + for corruptEnd < len(input) && input[corruptEnd] != output[corruptEnd] { + corruptEnd++ + } + t.Fatalf("Data corruption at offset %d (0x%x) to %d (0x%x): expected 0x%02x, got 0x%02x (corrupt bytes: %d)", + corruptStart, corruptStart, corruptEnd, corruptEnd, + input[i], output[i], corruptEnd-corruptStart) + } + } +} + +// TestPadUnpadVariousSizesAboveMTTresh tests Pad/Unpad roundtrip for various +// sizes above the MTTresh (512KB) threshold that triggers multithreading. +func TestPadUnpadVariousSizesAboveMTTresh(t *testing.T) { + // Test sizes from just above MTTresh to several MB + // These should all use the multithreaded path + sizes := []int{ + 513 * 1024 / 127 * 127, // Just above 512KB, aligned to chunks + 1 * 1024 * 1024 / 127 * 127, // ~1MB aligned + 2*1024*1024/127*127 + 127*100, // ~2MB + extra chunks + 4*1024*1024/127*127 + 127*333, // ~4MB + odd chunks + 8*1024*1024/127*127 + 127*777, // ~8MB + odd chunks + } + + for _, unpaddedSize := range sizes { + paddedSize := unpaddedSize / 127 * 128 + + t.Run(fmt.Sprintf("%d_bytes", unpaddedSize), func(t *testing.T) { + input := make([]byte, unpaddedSize) + _, err := rand.Read(input) + require.NoError(t, err) + + padded := make([]byte, paddedSize) + fr32.Pad(input, padded) + + output := make([]byte, unpaddedSize) + fr32.Unpad(padded, output) + + require.Equal(t, input, output) + }) + } +} + func BenchmarkPadChunk(b *testing.B) { var buf [128]byte in := bytes.Repeat([]byte{0xff}, 127) diff --git a/storage/sealer/fr32/readers.go b/storage/sealer/fr32/readers.go index f42278e5e94..e80c9bc6016 100644 --- a/storage/sealer/fr32/readers.go +++ b/storage/sealer/fr32/readers.go @@ -48,29 +48,33 @@ func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { } // NewUnpadReaderBuf creates a new unpadding reader using the provided buffer. -// The buffer must be a valid padded piece size (power of 2) and at least 128 bytes. -// The buffer is used for reading padded data; an internal buffer is allocated for unpadded output. +// sz is the number of padded bytes to read (must be a multiple of 128). +// buf is split 50/50: first half for padded input, second half for unpadded output. +// buf must be at least 256 bytes and a multiple of 128. func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) { - if err := sz.Validate(); err != nil { - return nil, xerrors.Errorf("bad piece size: %w", err) + if sz%128 != 0 { + return nil, xerrors.Errorf("padded size must be a multiple of 128: %d", sz) } - if abi.PaddedPieceSize(len(buf)).Validate() != nil { - return nil, xerrors.Errorf("bad buffer size: must be a valid padded piece size") + if len(buf) < 256 { + return nil, xerrors.Errorf("buffer too small: must be at least 256 bytes, got %d", len(buf)) } - if len(buf) < 128 { - return nil, xerrors.Errorf("buffer too small: must be at least 128 bytes") + if len(buf)%128 != 0 { + return nil, xerrors.Errorf("buffer size must be a multiple of 128: %d", len(buf)) } - // Calculate unpadbuf size: for N padded bytes, we produce N*127/128 unpadded bytes - padBufSize := len(buf) - unpadBufSize := (padBufSize / 128) * 127 + // Split buffer 50/50: first half for padded data, second half for unpadded output. + // Round down to ensure padbuf is a multiple of 128. + halfSize := (len(buf) / 2 / 128) * 128 + if halfSize < 128 { + halfSize = 128 + } return &unpadReader{ src: src, - padbuf: buf, - unpadbuf: make([]byte, unpadBufSize), + padbuf: buf[:halfSize], + unpadbuf: buf[halfSize:], left: uint64(sz), }, nil } diff --git a/storage/sealer/fr32/readers_test.go b/storage/sealer/fr32/readers_test.go index 8713132bb94..e48201afaac 100644 --- a/storage/sealer/fr32/readers_test.go +++ b/storage/sealer/fr32/readers_test.go @@ -45,7 +45,8 @@ func TestUnpadReaderBufWithSmallWorkBuf(t *testing.T) { padOut := make([]byte, ps.Padded()) fr32.Pad(raw, padOut) - buf := make([]byte, abi.PaddedPieceSize(uint64(128))) + // Minimum buffer size is 256 bytes (split 50/50 into 128-byte padbuf and 128-byte unpadbuf) + buf := make([]byte, 256) r, err := fr32.NewUnpadReaderBuf(bytes.NewReader(padOut), ps.Padded(), buf) if err != nil { t.Fatal(err) diff --git a/storage/sealer/piece_provider.go b/storage/sealer/piece_provider.go index 223a630843e..085fc837442 100644 --- a/storage/sealer/piece_provider.go +++ b/storage/sealer/piece_provider.go @@ -112,9 +112,12 @@ func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, se return nil, xerrors.Errorf("getting reader at +%d: %w", startOffsetAligned, err) } + // The actual padded size we're reading from the underlying reader + readPaddedSize := abi.PaddedPieceSize(endOffsetAligned.Padded() - startOffsetAligned.Padded()) + buf := pool.Get(fr32.BufSize(pieceSize.Padded())) - upr, err := fr32.NewUnpadReaderBuf(r, pieceSize.Padded(), buf) + upr, err := fr32.NewUnpadReaderBuf(r, readPaddedSize, buf) if err != nil { r.Close() // nolint return nil, xerrors.Errorf("creating unpadded reader: %w", err) From 1b8d112e097b792926d41bf9b48191649c831a15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Dec 2025 14:15:38 +0100 Subject: [PATCH 4/7] add changelog entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 989a79acaf8..d4ba3b207fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ # UNRELEASED - feat(gateway): expose StateGetRandomnessDigestFromBeacon ([filecoin-project/lotus#13339](https://github.com/filecoin-project/lotus/pull/13339)) +- fix(fr32): fix data corruption in multithreaded Pad/Unpad for non-aligned sizes # Node and Miner v1.34.0 / 2025-09-11 From b0d8e06e930916d97b589fe46f67532b61f534f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Dec 2025 14:36:05 +0100 Subject: [PATCH 5/7] pieceReader: use readPaddedSize for buffer sizing Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- storage/sealer/piece_provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/sealer/piece_provider.go b/storage/sealer/piece_provider.go index 085fc837442..f8ceffe9224 100644 --- a/storage/sealer/piece_provider.go +++ b/storage/sealer/piece_provider.go @@ -115,7 +115,7 @@ func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, se // The actual padded size we're reading from the underlying reader readPaddedSize := abi.PaddedPieceSize(endOffsetAligned.Padded() - startOffsetAligned.Padded()) - buf := pool.Get(fr32.BufSize(pieceSize.Padded())) + buf := pool.Get(fr32.BufSize(readPaddedSize)) upr, err := fr32.NewUnpadReaderBuf(r, readPaddedSize, buf) if err != nil { From f3a35c0a16148901066751b9e8a69e4ac3457242 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 9 Dec 2025 22:42:10 +0100 Subject: [PATCH 6/7] address important review issue --- storage/sealer/fr32/fr32_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/sealer/fr32/fr32_test.go b/storage/sealer/fr32/fr32_test.go index 413e0bcc28f..7b8e95cc9c3 100644 --- a/storage/sealer/fr32/fr32_test.go +++ b/storage/sealer/fr32/fr32_test.go @@ -170,7 +170,7 @@ func TestRoundtripMisalignedSizes(t *testing.T) { {"prime_chunks_1009", 1009 * 8}, // ~1MB, prime-ish number of chunks {"odd_chunks_8193", 8193}, // Just over 8192 (power of 2) {"odd_chunks_65537", 65537}, // Just over 65536 (power of 2) - {"odd_chunks_100003", 100003}, // Large prime-ish + {"odd_chunks_100003", 100003}, // Large prime {"boundary_chunks_66000", 66000}, // Near the original bug size {"boundary_chunks_70000", 70000}, // Larger odd size } From c11ecf68838782e8ba9e9a5cb2c44e962fc157db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 10 Dec 2025 13:46:22 +0100 Subject: [PATCH 7/7] fix lint --- storage/sealer/fr32/readers_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/sealer/fr32/readers_test.go b/storage/sealer/fr32/readers_test.go index e48201afaac..386b41d8768 100644 --- a/storage/sealer/fr32/readers_test.go +++ b/storage/sealer/fr32/readers_test.go @@ -577,7 +577,6 @@ func TestUnpadReaderSizeMismatch_OffsetRead(t *testing.T) { startOffsetPadded := startOffsetUnpadded.Padded() endOffsetPadded := endOffsetUnpadded.Padded() - rangePadded := endOffsetPadded - startOffsetPadded // Extract the padded range from the full padded data paddedRange := fullPadded[startOffsetPadded:endOffsetPadded] @@ -587,7 +586,7 @@ func TestUnpadReaderSizeMismatch_OffsetRead(t *testing.T) { // BUG PATTERN: Create unpadReader with full piece size but only range available // Note: We use a valid piece size that encompasses our range - declaredSize := abi.PaddedPieceSize(rangePadded) + declaredSize := endOffsetPadded - startOffsetPadded // Round up to valid piece size (power of 2) for declaredSize&(declaredSize-1) != 0 { declaredSize = declaredSize & (declaredSize - 1)