// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019+ Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package s2

import (
	
	
	
	
	
	
	
)

// ErrCantSeek is returned if the stream cannot be seeked.
type ErrCantSeek struct {
	Reason string
}

// Error returns the error as string.
func ( ErrCantSeek) () string {
	return fmt.Sprintf("s2: Can't seek because %s", .Reason)
}

// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
func ( io.Reader,  ...ReaderOption) *Reader {
	 := Reader{
		r:        ,
		maxBlock: maxBlockSize,
	}
	for ,  := range  {
		if  := (&);  != nil {
			.err = 
			return &
		}
	}
	.maxBufSize = MaxEncodedLen(.maxBlock) + checksumSize
	if .lazyBuf > 0 {
		.buf = make([]byte, MaxEncodedLen(.lazyBuf)+checksumSize)
	} else {
		.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
	}
	.readHeader = .ignoreStreamID
	.paramsOK = true
	return &
}

// ReaderOption is an option for creating a decoder.
type ReaderOption func(*Reader) error

// ReaderMaxBlockSize allows to control allocations if the stream
// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
// Blocks must be this size or smaller to decompress,
// otherwise the decoder will return ErrUnsupported.
//
// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
//
// Default is the maximum limit of 4MB.
func ( int) ReaderOption {
	return func( *Reader) error {
		if  > maxBlockSize ||  <= 0 {
			return errors.New("s2: block size too large. Must be <= 4MB and > 0")
		}
		if .lazyBuf == 0 &&  < defaultBlockSize {
			.lazyBuf = 
		}
		.maxBlock = 
		return nil
	}
}

// ReaderAllocBlock allows to control upfront stream allocations
// and not allocate for frames bigger than this initially.
// If frames bigger than this is seen a bigger buffer will be allocated.
//
// Default is 1MB, which is default output size.
func ( int) ReaderOption {
	return func( *Reader) error {
		if  > maxBlockSize ||  < 1024 {
			return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
		}
		.lazyBuf = 
		return nil
	}
}

// ReaderIgnoreStreamIdentifier will make the reader skip the expected
// stream identifier at the beginning of the stream.
// This can be used when serving a stream that has been forwarded to a specific point.
func () ReaderOption {
	return func( *Reader) error {
		.ignoreStreamID = true
		return nil
	}
}

// ReaderSkippableCB will register a callback for chuncks with the specified ID.
// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
// For each chunk with the ID, the callback is called with the content.
// Any returned non-nil error will abort decompression.
// Only one callback per ID is supported, latest sent will be used.
// You can peek the stream, triggering the callback, by doing a Read with a 0
// byte buffer.
func ( uint8,  func( io.Reader) error) ReaderOption {
	return func( *Reader) error {
		if  < 0x80 ||  > 0xfd {
			return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)")
		}
		.skippableCB[-0x80] = 
		return nil
	}
}

// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
func () ReaderOption {
	return func( *Reader) error {
		.ignoreCRC = true
		return nil
	}
}

// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
	r           io.Reader
	err         error
	decoded     []byte
	buf         []byte
	skippableCB [0xff - 0x80]func(r io.Reader) error
	blockStart  int64 // Uncompressed offset at start of current.
	index       *Index

	// decoded[i:j] contains decoded bytes that have not yet been passed on.
	i, j int
	// maximum block size allowed.
	maxBlock int
	// maximum expected buffer size.
	maxBufSize int
	// alloc a buffer this size if > 0.
	lazyBuf        int
	readHeader     bool
	paramsOK       bool
	snappyFrame    bool
	ignoreStreamID bool
	ignoreCRC      bool
}

// GetBufferCapacity returns the capacity of the internal buffer.
// This might be useful to know when reusing the same reader in combination
// with the lazy buffer option.
func ( *Reader) () int {
	return cap(.buf)
}

// ensureBufferSize will ensure that the buffer can take at least n bytes.
// If false is returned the buffer exceeds maximum allowed size.
func ( *Reader) ( int) bool {
	if  > .maxBufSize {
		.err = ErrCorrupt
		return false
	}
	if cap(.buf) >=  {
		return true
	}
	// Realloc buffer.
	.buf = make([]byte, )
	return true
}

// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func ( *Reader) ( io.Reader) {
	if !.paramsOK {
		return
	}
	.index = nil
	.r = 
	.err = nil
	.i = 0
	.j = 0
	.blockStart = 0
	.readHeader = .ignoreStreamID
}

func ( *Reader) ( []byte,  bool) ( bool) {
	if _, .err = io.ReadFull(.r, ); .err != nil {
		if .err == io.ErrUnexpectedEOF || (.err == io.EOF && !) {
			.err = ErrCorrupt
		}
		return false
	}
	return true
}

// skippable will skip n bytes.
// If the supplied reader supports seeking that is used.
// tmp is used as a temporary buffer for reading.
// The supplied slice does not need to be the size of the read.
func ( *Reader) ( []byte,  int,  bool,  uint8) ( bool) {
	if  < 0x80 {
		.err = fmt.Errorf("internal error: skippable id < 0x80")
		return false
	}
	if  := .skippableCB[-0x80];  != nil {
		 := io.LimitReader(.r, int64())
		.err = ()
		if .err != nil {
			return false
		}
		_, .err = io.CopyBuffer(ioutil.Discard, , )
		return .err == nil
	}
	if ,  := .r.(io.ReadSeeker);  {
		,  := .Seek(int64(), io.SeekCurrent)
		if  == nil {
			return true
		}
		if  == io.ErrUnexpectedEOF || (.err == io.EOF && !) {
			.err = ErrCorrupt
			return false
		}
	}
	for  > 0 {
		if  < len() {
			 = [:]
		}
		if _, .err = io.ReadFull(.r, ); .err != nil {
			if .err == io.ErrUnexpectedEOF || (.err == io.EOF && !) {
				.err = ErrCorrupt
			}
			return false
		}
		 -= len()
	}
	return true
}

// Read satisfies the io.Reader interface.
func ( *Reader) ( []byte) (int, error) {
	if .err != nil {
		return 0, .err
	}
	for {
		if .i < .j {
			 := copy(, .decoded[.i:.j])
			.i += 
			return , nil
		}
		if !.readFull(.buf[:4], true) {
			return 0, .err
		}
		 := .buf[0]
		if !.readHeader {
			if  != chunkTypeStreamIdentifier {
				.err = ErrCorrupt
				return 0, .err
			}
			.readHeader = true
		}
		 := int(.buf[1]) | int(.buf[2])<<8 | int(.buf[3])<<16

		// The chunk types are specified at
		// https://github.com/google/snappy/blob/master/framing_format.txt
		switch  {
		case chunkTypeCompressedData:
			.blockStart += int64(.j)
			// Section 4.2. Compressed data (chunk type 0x00).
			if  < checksumSize {
				.err = ErrCorrupt
				return 0, .err
			}
			if !.ensureBufferSize() {
				if .err == nil {
					.err = ErrUnsupported
				}
				return 0, .err
			}
			 := .buf[:]
			if !.readFull(, false) {
				return 0, .err
			}
			 := uint32([0]) | uint32([1])<<8 | uint32([2])<<16 | uint32([3])<<24
			 = [checksumSize:]

			,  := DecodedLen()
			if  != nil {
				.err = 
				return 0, .err
			}
			if .snappyFrame &&  > maxSnappyBlockSize {
				.err = ErrCorrupt
				return 0, .err
			}

			if  > len(.decoded) {
				if  > .maxBlock {
					.err = ErrCorrupt
					return 0, .err
				}
				.decoded = make([]byte, )
			}
			if ,  := Decode(.decoded, );  != nil {
				.err = 
				return 0, .err
			}
			if !.ignoreCRC && crc(.decoded[:]) !=  {
				.err = ErrCRC
				return 0, .err
			}
			.i, .j = 0, 
			continue

		case chunkTypeUncompressedData:
			.blockStart += int64(.j)
			// Section 4.3. Uncompressed data (chunk type 0x01).
			if  < checksumSize {
				.err = ErrCorrupt
				return 0, .err
			}
			if !.ensureBufferSize() {
				if .err == nil {
					.err = ErrUnsupported
				}
				return 0, .err
			}
			 := .buf[:checksumSize]
			if !.readFull(, false) {
				return 0, .err
			}
			 := uint32([0]) | uint32([1])<<8 | uint32([2])<<16 | uint32([3])<<24
			// Read directly into r.decoded instead of via r.buf.
			 :=  - checksumSize
			if .snappyFrame &&  > maxSnappyBlockSize {
				.err = ErrCorrupt
				return 0, .err
			}
			if  > len(.decoded) {
				if  > .maxBlock {
					.err = ErrCorrupt
					return 0, .err
				}
				.decoded = make([]byte, )
			}
			if !.readFull(.decoded[:], false) {
				return 0, .err
			}
			if !.ignoreCRC && crc(.decoded[:]) !=  {
				.err = ErrCRC
				return 0, .err
			}
			.i, .j = 0, 
			continue

		case chunkTypeStreamIdentifier:
			// Section 4.1. Stream identifier (chunk type 0xff).
			if  != len(magicBody) {
				.err = ErrCorrupt
				return 0, .err
			}
			if !.readFull(.buf[:len(magicBody)], false) {
				return 0, .err
			}
			if string(.buf[:len(magicBody)]) != magicBody {
				if string(.buf[:len(magicBody)]) != magicBodySnappy {
					.err = ErrCorrupt
					return 0, .err
				} else {
					.snappyFrame = true
				}
			} else {
				.snappyFrame = false
			}
			continue
		}

		if  <= 0x7f {
			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
			// fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
			.err = ErrUnsupported
			return 0, .err
		}
		// Section 4.4 Padding (chunk type 0xfe).
		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
		if  > maxChunkSize {
			// fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
			.err = ErrUnsupported
			return 0, .err
		}

		// fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
		if !.skippable(.buf, , false, ) {
			return 0, .err
		}
	}
}

// DecodeConcurrent will decode the full stream to w.
// This function should not be combined with reading, seeking or other operations.
// Up to 'concurrent' goroutines will be used.
// If <= 0, runtime.NumCPU will be used.
// On success the number of bytes decompressed nil and is returned.
// This is mainly intended for bigger streams.
func ( *Reader) ( io.Writer,  int) ( int64,  error) {
	if .i > 0 || .j > 0 || .blockStart > 0 {
		return 0, errors.New("DecodeConcurrent called after ")
	}
	if  <= 0 {
		 = runtime.NumCPU()
	}

	// Write to output
	var  sync.Mutex
	var  error
	 := func( error) ( bool) {
		.Lock()
		defer .Unlock()
		if  == nil {
			return  == nil
		}
		if  == nil {
			 = 
		}
		return false
	}
	 := func() ( bool) {
		.Lock()
		 :=  != nil
		.Unlock()
		return 
	}

	var  int64
	 := make(chan []byte, )
	 := make(chan []byte, )
	 := make(chan chan []byte, )
	 := make(chan chan []byte, )
	for  := 0;  < ; ++ {
		 <- make([]byte, 0, .maxBufSize)
		 <- make([]byte, 0, .maxBufSize)
		 <- make(chan []byte, 1)
	}
	// Writer
	var  sync.WaitGroup
	.Add(1)
	go func() {
		defer .Done()
		for  := range  {
			 := <-
			 <- 
			if () ||  == nil {
				if  != nil {
					 <- 
				}
				continue
			}
			if () {
				 <- 
				continue
			}
			,  := .Write()
			 := len()
			 <- 
			if  != nil {
				()
				continue
			}
			if  !=  {
				(io.ErrShortWrite)
				continue
			}
			 += int64()
		}
	}()

	defer func() {
		if .err != nil {
			(.err)
		} else if  != nil {
			()
		}
		close()
		.Wait()
		if  == nil {
			 = 
		}
		 = 
	}()

	// Reader
	for !() {
		if !.readFull(.buf[:4], true) {
			if .err == io.EOF {
				.err = nil
			}
			return 0, .err
		}
		 := .buf[0]
		if !.readHeader {
			if  != chunkTypeStreamIdentifier {
				.err = ErrCorrupt
				return 0, .err
			}
			.readHeader = true
		}
		 := int(.buf[1]) | int(.buf[2])<<8 | int(.buf[3])<<16

		// The chunk types are specified at
		// https://github.com/google/snappy/blob/master/framing_format.txt
		switch  {
		case chunkTypeCompressedData:
			.blockStart += int64(.j)
			// Section 4.2. Compressed data (chunk type 0x00).
			if  < checksumSize {
				.err = ErrCorrupt
				return 0, .err
			}
			if  > .maxBufSize {
				.err = ErrCorrupt
				return 0, .err
			}
			 := <-
			 := [:]

			if !.readFull(, false) {
				return 0, .err
			}

			 := uint32([0]) | uint32([1])<<8 | uint32([2])<<16 | uint32([3])<<24
			 = [checksumSize:]

			,  := DecodedLen()
			if  != nil {
				.err = 
				return 0, .err
			}
			if .snappyFrame &&  > maxSnappyBlockSize {
				.err = ErrCorrupt
				return 0, .err
			}

			if  > .maxBlock {
				.err = ErrCorrupt
				return 0, .err
			}
			.Add(1)

			 := <-
			 := <-
			 <- 
			go func() {
				defer .Done()
				 = [:]
				,  := Decode(, )
				 <- 
				if  != nil {
					 <- 
					()
					 <- nil
					return
				}
				if !.ignoreCRC && crc() !=  {
					 <- 
					(ErrCRC)
					 <- nil
					return
				}
				 <- 
			}()
			continue

		case chunkTypeUncompressedData:

			// Section 4.3. Uncompressed data (chunk type 0x01).
			if  < checksumSize {
				.err = ErrCorrupt
				return 0, .err
			}
			if  > .maxBufSize {
				.err = ErrCorrupt
				return 0, .err
			}
			// Grab write buffer
			 := <-
			 := [:checksumSize]
			if !.readFull(, false) {
				return 0, .err
			}
			 := uint32([0]) | uint32([1])<<8 | uint32([2])<<16 | uint32([3])<<24
			// Read content.
			 :=  - checksumSize

			if .snappyFrame &&  > maxSnappyBlockSize {
				.err = ErrCorrupt
				return 0, .err
			}
			if  > .maxBlock {
				.err = ErrCorrupt
				return 0, .err
			}
			// Read uncompressed
			 = [:]
			if !.readFull(, false) {
				return 0, .err
			}

			if !.ignoreCRC && crc() !=  {
				.err = ErrCRC
				return 0, .err
			}
			 := <-
			 <- 
			 <- 
			continue

		case chunkTypeStreamIdentifier:
			// Section 4.1. Stream identifier (chunk type 0xff).
			if  != len(magicBody) {
				.err = ErrCorrupt
				return 0, .err
			}
			if !.readFull(.buf[:len(magicBody)], false) {
				return 0, .err
			}
			if string(.buf[:len(magicBody)]) != magicBody {
				if string(.buf[:len(magicBody)]) != magicBodySnappy {
					.err = ErrCorrupt
					return 0, .err
				} else {
					.snappyFrame = true
				}
			} else {
				.snappyFrame = false
			}
			continue
		}

		if  <= 0x7f {
			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
			// fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
			.err = ErrUnsupported
			return 0, .err
		}
		// Section 4.4 Padding (chunk type 0xfe).
		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
		if  > maxChunkSize {
			// fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
			.err = ErrUnsupported
			return 0, .err
		}

		// fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
		if !.skippable(.buf, , false, ) {
			return 0, .err
		}
	}
	return 0, .err
}

// Skip will skip n bytes forward in the decompressed output.
// For larger skips this consumes less CPU and is faster than reading output and discarding it.
// CRC is not checked on skipped blocks.
// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
// If a decoding error is encountered subsequent calls to Read will also fail.
func ( *Reader) ( int64) error {
	if  < 0 {
		return errors.New("attempted negative skip")
	}
	if .err != nil {
		return .err
	}

	for  > 0 {
		if .i < .j {
			// Skip in buffer.
			// decoded[i:j] contains decoded bytes that have not yet been passed on.
			 := int64(.j - .i)
			if  >=  {
				 := int64(.i) + 
				if  > math.MaxInt32 {
					return errors.New("s2: internal overflow in skip")
				}
				.i = int()
				return nil
			}
			 -= int64(.j - .i)
			.i = .j
		}

		// Buffer empty; read blocks until we have content.
		if !.readFull(.buf[:4], true) {
			if .err == io.EOF {
				.err = io.ErrUnexpectedEOF
			}
			return .err
		}
		 := .buf[0]
		if !.readHeader {
			if  != chunkTypeStreamIdentifier {
				.err = ErrCorrupt
				return .err
			}
			.readHeader = true
		}
		 := int(.buf[1]) | int(.buf[2])<<8 | int(.buf[3])<<16

		// The chunk types are specified at
		// https://github.com/google/snappy/blob/master/framing_format.txt
		switch  {
		case chunkTypeCompressedData:
			.blockStart += int64(.j)
			// Section 4.2. Compressed data (chunk type 0x00).
			if  < checksumSize {
				.err = ErrCorrupt
				return .err
			}
			if !.ensureBufferSize() {
				if .err == nil {
					.err = ErrUnsupported
				}
				return .err
			}
			 := .buf[:]
			if !.readFull(, false) {
				return .err
			}
			 := uint32([0]) | uint32([1])<<8 | uint32([2])<<16 | uint32([3])<<24
			 = [checksumSize:]

			,  := DecodedLen()
			if  != nil {
				.err = 
				return .err
			}
			if  > .maxBlock {
				.err = ErrCorrupt
				return .err
			}
			// Check if destination is within this block
			if int64() >  {
				if len(.decoded) <  {
					.decoded = make([]byte, )
				}
				if ,  := Decode(.decoded, );  != nil {
					.err = 
					return .err
				}
				if crc(.decoded[:]) !=  {
					.err = ErrCorrupt
					return .err
				}
			} else {
				// Skip block completely
				 -= int64()
				.blockStart += int64()
				 = 0
			}
			.i, .j = 0, 
			continue
		case chunkTypeUncompressedData:
			.blockStart += int64(.j)
			// Section 4.3. Uncompressed data (chunk type 0x01).
			if  < checksumSize {
				.err = ErrCorrupt
				return .err
			}
			if !.ensureBufferSize() {
				if .err != nil {
					.err = ErrUnsupported
				}
				return .err
			}
			 := .buf[:checksumSize]
			if !.readFull(, false) {
				return .err
			}
			 := uint32([0]) | uint32([1])<<8 | uint32([2])<<16 | uint32([3])<<24
			// Read directly into r.decoded instead of via r.buf.
			 :=  - checksumSize
			if  > len(.decoded) {
				if  > .maxBlock {
					.err = ErrCorrupt
					return .err
				}
				.decoded = make([]byte, )
			}
			if !.readFull(.decoded[:], false) {
				return .err
			}
			if int64() <  {
				if crc(.decoded[:]) !=  {
					.err = ErrCorrupt
					return .err
				}
			}
			.i, .j = 0, 
			continue
		case chunkTypeStreamIdentifier:
			// Section 4.1. Stream identifier (chunk type 0xff).
			if  != len(magicBody) {
				.err = ErrCorrupt
				return .err
			}
			if !.readFull(.buf[:len(magicBody)], false) {
				return .err
			}
			if string(.buf[:len(magicBody)]) != magicBody {
				if string(.buf[:len(magicBody)]) != magicBodySnappy {
					.err = ErrCorrupt
					return .err
				}
			}

			continue
		}

		if  <= 0x7f {
			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
			.err = ErrUnsupported
			return .err
		}
		if  > maxChunkSize {
			.err = ErrUnsupported
			return .err
		}
		// Section 4.4 Padding (chunk type 0xfe).
		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
		if !.skippable(.buf, , false, ) {
			return .err
		}
	}
	return nil
}

// ReadSeeker provides random or forward seeking in compressed content.
// See Reader.ReadSeeker
type ReadSeeker struct {
	*Reader
	readAtMu sync.Mutex
}

// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
// compatible version of the reader.
// If 'random' is specified the returned io.Seeker can be used for
// random seeking, otherwise only forward seeking is supported.
// Enabling random seeking requires the original input to support
// the io.Seeker interface.
// A custom index can be specified which will be used if supplied.
// When using a custom index, it will not be read from the input stream.
// The ReadAt position will affect regular reads and the current position of Seek.
// So using Read after ReadAt will continue from where the ReadAt stopped.
// No functions should be used concurrently.
// The returned ReadSeeker contains a shallow reference to the existing Reader,
// meaning changes performed to one is reflected in the other.
func ( *Reader) ( bool,  []byte) (*ReadSeeker, error) {
	// Read index if provided.
	if len() != 0 {
		if .index == nil {
			.index = &Index{}
		}
		if ,  := .index.Load();  != nil {
			return nil, ErrCantSeek{Reason: "loading index returned: " + .Error()}
		}
	}

	// Check if input is seekable
	,  := .r.(io.ReadSeeker)
	if ! {
		if ! {
			return &ReadSeeker{Reader: }, nil
		}
		return nil, ErrCantSeek{Reason: "input stream isn't seekable"}
	}

	if .index != nil {
		// Seekable and index, ok...
		return &ReadSeeker{Reader: }, nil
	}

	// Load from stream.
	.index = &Index{}

	// Read current position.
	,  := .Seek(0, io.SeekCurrent)
	if  != nil {
		return nil, ErrCantSeek{Reason: "seeking input returned: " + .Error()}
	}
	 = .index.LoadStream()
	if  != nil {
		if  == ErrUnsupported {
			// If we don't require random seeking, reset input and return.
			if ! {
				_,  = .Seek(, io.SeekStart)
				if  != nil {
					return nil, ErrCantSeek{Reason: "resetting stream returned: " + .Error()}
				}
				.index = nil
				return &ReadSeeker{Reader: }, nil
			}
			return nil, ErrCantSeek{Reason: "input stream does not contain an index"}
		}
		return nil, ErrCantSeek{Reason: "reading index returned: " + .Error()}
	}

	// reset position.
	_,  = .Seek(, io.SeekStart)
	if  != nil {
		return nil, ErrCantSeek{Reason: "seeking input returned: " + .Error()}
	}
	return &ReadSeeker{Reader: }, nil
}

// Seek allows seeking in compressed data.
func ( *ReadSeeker) ( int64,  int) (int64, error) {
	if .err != nil {
		if !errors.Is(.err, io.EOF) {
			return 0, .err
		}
		// Reset on EOF
		.err = nil
	}

	// Calculate absolute offset.
	 := 

	switch  {
	case io.SeekStart:
	case io.SeekCurrent:
		 = .blockStart + int64(.i) + 
	case io.SeekEnd:
		if .index == nil {
			return 0, ErrUnsupported
		}
		 = .index.TotalUncompressed + 
	default:
		.err = ErrUnsupported
		return 0, .err
	}

	if  < 0 {
		return 0, errors.New("seek before start of file")
	}

	if !.readHeader {
		// Make sure we read the header.
		_, .err = .Read([]byte{})
		if .err != nil {
			return 0, .err
		}
	}

	// If we are inside current block no need to seek.
	// This includes no offset changes.
	if  >= .blockStart &&  < .blockStart+int64(.j) {
		.i = int( - .blockStart)
		return .blockStart + int64(.i), nil
	}

	,  := .r.(io.ReadSeeker)
	if .index == nil || ! {
		 := .blockStart + int64(.i)
		if  >=  {
			 := .Skip( - )
			return .blockStart + int64(.i), 
		}
		return 0, ErrUnsupported
	}

	// We can seek and we have an index.
	, ,  := .index.Find()
	if  != nil {
		return .blockStart + int64(.i), 
	}

	// Seek to next block
	_,  = .Seek(, io.SeekStart)
	if  != nil {
		return 0, 
	}

	.i = .j                     // Remove rest of current block.
	.blockStart =  - int64(.j) // Adjust current block start for accounting.
	if  <  {
		// Forward inside block
		return , .Skip( - )
	}
	if  >  {
		return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", , )
	}
	return , nil
}

// ReadAt reads len(p) bytes into p starting at offset off in the
// underlying input source. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error
// explaining why more bytes were not returned. In this respect,
// ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the
// input source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the
// same input source. This is however not recommended.
func ( *ReadSeeker) ( []byte,  int64) (int, error) {
	.readAtMu.Lock()
	defer .readAtMu.Unlock()
	,  := .Seek(, io.SeekStart)
	if  != nil {
		return 0, 
	}
	 := 0
	for  < len() {
		,  := .Read([:])
		if  != nil {
			// This will include io.EOF
			return  + , 
		}
		 += 
	}
	return , nil
}

// ReadByte satisfies the io.ByteReader interface.
func ( *Reader) () (byte, error) {
	if .err != nil {
		return 0, .err
	}
	if .i < .j {
		 := .decoded[.i]
		.i++
		return , nil
	}
	var  [1]byte
	for  := 0;  < 10; ++ {
		,  := .Read([:])
		if  != nil {
			return 0, 
		}
		if  == 1 {
			return [0], nil
		}
	}
	return 0, io.ErrNoProgress
}

// SkippableCB will register a callback for chunks with the specified ID.
// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
// For each chunk with the ID, the callback is called with the content.
// Any returned non-nil error will abort decompression.
// Only one callback per ID is supported, latest sent will be used.
// Sending a nil function will disable previous callbacks.
// You can peek the stream, triggering the callback, by doing a Read with a 0
// byte buffer.
func ( *Reader) ( uint8,  func( io.Reader) error) error {
	if  < 0x80 ||  >= chunkTypePadding {
		return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)")
	}
	.skippableCB[-0x80] = 
	return nil
}