Skip to content

Commit

Permalink
create vfs_test.go
Browse files Browse the repository at this point in the history
  • Loading branch information
soypat committed Feb 20, 2024
1 parent a99ecdb commit 6180576
Show file tree
Hide file tree
Showing 2 changed files with 151 additions and 65 deletions.
75 changes: 10 additions & 65 deletions fat_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package fat
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"log/slog"
"os"
Expand Down Expand Up @@ -168,7 +167,11 @@ func TestFileInfo(t *testing.T) {
}
}

func DefaultFATByteBlocks(numBlocks int) *BytesBlocks {
func DefaultFATByteBlocks(numBlocks int) BlockDeviceExtended {
// TODO: try BlockMap implementation with tests to see perf improvements?
// newfat := maps.Clone(fatInit)
// blk := &BlockMap{data: newfat}
// return blk
const defaultBlockSize = 512
blk, _ := makeBlockIndexer(defaultBlockSize)
buf := make([]byte, defaultBlockSize*numBlocks)
Expand All @@ -179,7 +182,7 @@ func DefaultFATByteBlocks(numBlocks int) *BytesBlocks {
}
copy(buf[off:], b[:])
}
return &BytesBlocks{
return &BlockByteSlice{
blk: blk,
buf: buf,
}
Expand All @@ -191,65 +194,6 @@ func mustBeOK(t *testing.T, fr fileResult) {
}
}

type BytesBlocks struct {
blk blkIdxer
buf []byte
}

func (b *BytesBlocks) BlockSize() int { return int(b.blk.size()) }

func (b *BytesBlocks) ReadBlocks(dst []byte, startBlock int64) (int, error) {
if b.blk.off(int64(len(dst))) != 0 {
return 0, errors.New("startBlock not aligned to block size")
} else if startBlock < 0 {
return 0, errors.New("invalid startBlock")
}
off := startBlock * b.blk.size()
end := off + int64(len(dst))
if end > int64(len(b.buf)) {
return 0, fmt.Errorf("read past end of buffer: %d > %d", end, len(b.buf))
// return 0, errors.New("read past end of buffer")
}

return copy(dst, b.buf[off:end]), nil
}
func (b *BytesBlocks) WriteBlocks(data []byte, startBlock int64) (int, error) {
if b.blk.off(int64(len(data))) != 0 {
return 0, errors.New("startBlock not aligned to block size")
} else if startBlock < 0 {
return 0, errors.New("invalid startBlock")
}
off := startBlock * b.blk.size()
end := off + int64(len(data))
if end > int64(len(b.buf)) {
return 0, fmt.Errorf("write past end of buffer: %d > %d", end, len(b.buf))
// return 0, errors.New("write past end of buffer")
}

return copy(b.buf[off:end], data), nil
}
func (b *BytesBlocks) EraseBlocks(startBlock, numBlocks int64) error {
if startBlock < 0 || numBlocks <= 0 {
return errors.New("invalid erase parameters")
}
start := startBlock * b.blk.size()
end := start + numBlocks*b.blk.size()
if end > int64(len(b.buf)) {
return errors.New("erase past end of buffer")
}
clear(b.buf[start:end])
return nil
}

func (b *BytesBlocks) Size() int64 {
return int64(len(b.buf))
}

// Mode returns 0 for no connection/prohibited access, 1 for read-only, 3 for read-write.
func (b *BytesBlocks) Mode() uint8 {
return 3
}

func fatInitDiff(data []byte) (s string) {
max := int64(len(data)) / 512
for block := int64(0); block < max; block++ {
Expand All @@ -264,17 +208,18 @@ func fatInitDiff(data []byte) (s string) {
}
return s
}
func initTestFAT() (*FS, *BytesBlocks) {

func initTestFAT() (*FS, BlockDeviceExtended) {
return initTestFATWithLogger(32000, slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
Level: slogLevelTrace,
})))

}
func initTestFATWithLogger(size int64, log *slog.Logger) (*FS, *BytesBlocks) {
func initTestFATWithLogger(size int64, log *slog.Logger) (*FS, BlockDeviceExtended) {
dev := DefaultFATByteBlocks(int(size))
var fs FS
fs.log = log
ss := uint16(dev.blk.size())
ss := uint16(dev.BlockSize())
err := fs.Mount(dev, int(ss), ModeRW)
if err != nil {
panic(err)
Expand Down
141 changes: 141 additions & 0 deletions vfs_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
package fat

import (
"errors"
"fmt"
)

type BlockDeviceExtended interface {
BlockDevice
Size() int64
BlockSize() int
}

const blkmapsize = 512

type BlockMap struct {
data map[int64][blkmapsize]byte
}

func (b *BlockMap) BlockSize() int { return blkmapsize }

func (b *BlockMap) Size() int64 {
const kilobyte = 1000
const megabyte = 1000 * kilobyte
const gigabyte = 1000 * megabyte
return 4 * gigabyte // 4GB does not overflow uint32, so likely safe for use with FAT32?
}

func (b *BlockMap) ReadBlocks(dst []byte, startBlock int64) (int, error) {
if startBlock < 0 {
return 0, errors.New("invalid startBlock")
}

lastbidx := len(dst) / blkmapsize
if len(dst)%blkmapsize != 0 {
return 0, errors.New("dst size not multiple of block size")
}
for bidx := int64(0); bidx < int64(lastbidx); bidx++ {
block := b.data[bidx]
copy(dst[:], block[:])
dst = dst[blkmapsize:]
}
return len(dst), nil
}

func (b *BlockMap) WriteBlocks(data []byte, startBlock int64) (int, error) {
if startBlock < 0 {
return 0, errors.New("invalid startBlock")
}

lastbidx := len(data) / blkmapsize
if len(data)%blkmapsize != 0 {
return 0, errors.New("data size not multiple of block size")
}
var auxblk [blkmapsize]byte
for bidx := int64(0); bidx < int64(lastbidx); bidx++ {
copy(auxblk[:], data[:])
b.data[bidx] = auxblk
data = data[blkmapsize:]
auxblk = [blkmapsize]byte{}
}
return len(data), nil
}

func (b *BlockMap) EraseBlocks(startBlock, numBlocks int64) error {
if startBlock < 0 || numBlocks <= 0 {
return errors.New("invalid erase parameters")
}
end := startBlock + numBlocks
if end < startBlock {
return errors.New("overflow")
}
if len(b.data) > 1024 {
// Optimized for maps with many entries.
for i := startBlock; i < end; i++ {
delete(b.data, i)
}
} else {
// Optimized for maps with few entries.
for blkidx := range b.data {
if blkidx >= startBlock && blkidx < end {
delete(b.data, blkidx)
}
}
}
return nil
}

type BlockByteSlice struct {
blk blkIdxer
buf []byte
}

func (b *BlockByteSlice) BlockSize() int { return int(b.blk.size()) }

func (b *BlockByteSlice) ReadBlocks(dst []byte, startBlock int64) (int, error) {
if b.blk.off(int64(len(dst))) != 0 {
return 0, errors.New("startBlock not aligned to block size")
} else if startBlock < 0 {
return 0, errors.New("invalid startBlock")
}
off := startBlock * b.blk.size()
end := off + int64(len(dst))
if end > int64(len(b.buf)) {
return 0, fmt.Errorf("read past end of buffer: %d > %d", end, len(b.buf))
// return 0, errors.New("read past end of buffer")
}

return copy(dst, b.buf[off:end]), nil
}
func (b *BlockByteSlice) WriteBlocks(data []byte, startBlock int64) (int, error) {
if b.blk.off(int64(len(data))) != 0 {
return 0, errors.New("startBlock not aligned to block size")
} else if startBlock < 0 {
return 0, errors.New("invalid startBlock")
}
off := startBlock * b.blk.size()
end := off + int64(len(data))
if end > int64(len(b.buf)) {
return 0, fmt.Errorf("write past end of buffer: %d > %d", end, len(b.buf))
// return 0, errors.New("write past end of buffer")
}

return copy(b.buf[off:end], data), nil
}
func (b *BlockByteSlice) EraseBlocks(startBlock, numBlocks int64) error {
if startBlock < 0 || numBlocks <= 0 {
return errors.New("invalid erase parameters")
}
start := startBlock * b.blk.size()
end := start + numBlocks*b.blk.size()
if end > int64(len(b.buf)) {
return errors.New("erase past end of buffer")
}
clear(b.buf[start:end])
return nil
}

func (b *BlockByteSlice) Size() int64 {
return int64(len(b.buf))
}

0 comments on commit 6180576

Please sign in to comment.