package zstd

import (
	
	

	
)

const (
	dictShardBits = 6
)

type fastBase struct {
	// cur is the offset at the start of hist
	cur int32
	// maximum offset. Should be at least 2x block size.
	maxMatchOff int32
	bufferReset int32
	hist        []byte
	crc         *xxhash.Digest
	tmp         [8]byte
	blk         *blockEnc
	lastDictID  uint32
	lowMem      bool
}

// CRC returns the underlying CRC writer.
func ( *fastBase) () *xxhash.Digest {
	return .crc
}

// AppendCRC will append the CRC to the destination slice and return it.
func ( *fastBase) ( []byte) []byte {
	 := .crc.Sum(.tmp[:0])
	 = append(, [7], [6], [5], [4])
	return 
}

// WindowSize returns the window size of the encoder,
// or a window size small enough to contain the input size, if > 0.
func ( *fastBase) ( int64) int32 {
	if  > 0 &&  < int64(.maxMatchOff) {
		 := int32(1) << uint(bits.Len(uint()))
		// Keep minimum window.
		if  < 1024 {
			 = 1024
		}
		return 
	}
	return .maxMatchOff
}

// Block returns the current block.
func ( *fastBase) () *blockEnc {
	return .blk
}

func ( *fastBase) ( []byte) int32 {
	if debugAsserts && .cur > .bufferReset {
		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", .cur, .bufferReset))
	}
	// check if we have space already
	if len(.hist)+len() > cap(.hist) {
		if cap(.hist) == 0 {
			.ensureHist(len())
		} else {
			if cap(.hist) < int(.maxMatchOff+maxCompressedBlockSize) {
				panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(.hist), .maxMatchOff+maxCompressedBlockSize, .maxMatchOff))
			}
			// Move down
			 := int32(len(.hist)) - .maxMatchOff
			copy(.hist[0:.maxMatchOff], .hist[:])
			.cur += 
			.hist = .hist[:.maxMatchOff]
		}
	}
	 := int32(len(.hist))
	.hist = append(.hist, ...)
	return 
}

// ensureHist will ensure that history can keep at least this many bytes.
func ( *fastBase) ( int) {
	if cap(.hist) >=  {
		return
	}
	 := .maxMatchOff
	if (.lowMem && .maxMatchOff > maxCompressedBlockSize) || .maxMatchOff <= maxCompressedBlockSize {
		 += maxCompressedBlockSize
	} else {
		 += .maxMatchOff
	}
	// Make it at least 1MB.
	if  < 1<<20 && !.lowMem {
		 = 1 << 20
	}
	// Make it at least the requested size.
	if  < int32() {
		 = int32()
	}
	.hist = make([]byte, 0, )
}

// useBlock will replace the block with the provided one,
// but transfer recent offsets from the previous.
func ( *fastBase) ( *blockEnc) {
	.reset(.blk)
	.blk = 
}

func ( *fastBase) (,  int32,  []byte) int32 {
	if debugAsserts {
		if  < 0 {
			 := fmt.Sprintf("s (%d) < 0", )
			panic()
		}
		if  < 0 {
			 := fmt.Sprintf("t (%d) < 0", )
			panic()
		}
		if - > .maxMatchOff {
			 := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", , , .maxMatchOff)
			panic()
		}
		if len()-int() > maxCompressedBlockSize {
			panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len()-int(), maxCompressedBlockSize))
		}
	}
	return int32(matchLen([:], [:]))
}

// Reset the encoding table.
func ( *fastBase) ( *dict,  bool) {
	if .blk == nil {
		.blk = &blockEnc{lowMem: .lowMem}
		.blk.init()
	} else {
		.blk.reset(nil)
	}
	.blk.initNewEncode()
	if .crc == nil {
		.crc = xxhash.New()
	} else {
		.crc.Reset()
	}
	.blk.dictLitEnc = nil
	if  != nil {
		 := .lowMem
		if  {
			.lowMem = true
		}
		.ensureHist(.ContentSize() + maxCompressedBlockSize)
		.lowMem = 
	}

	// We offset current position so everything will be out of reach.
	// If above reset line, history will be purged.
	if .cur < .bufferReset {
		.cur += .maxMatchOff + int32(len(.hist))
	}
	.hist = .hist[:0]
	if  != nil {
		// Set offsets (currently not used)
		for ,  := range .offsets {
			.blk.recentOffsets[] = uint32()
			.blk.prevRecentOffsets[] = .blk.recentOffsets[]
		}
		// Transfer litenc.
		.blk.dictLitEnc = .litEnc
		.hist = append(.hist, .content...)
	}
}