// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.

package zstd

import (
	
	
	
	
	

	
)

type blockEnc struct {
	size       int
	literals   []byte
	sequences  []seq
	coders     seqCoders
	litEnc     *huff0.Scratch
	dictLitEnc *huff0.Scratch
	wr         bitWriter

	extraLits         int
	output            []byte
	recentOffsets     [3]uint32
	prevRecentOffsets [3]uint32

	last   bool
	lowMem bool
}

// init should be used once the block has been created.
// If called more than once, the effect is the same as calling reset.
func ( *blockEnc) () {
	if .lowMem {
		// 1K literals
		if cap(.literals) < 1<<10 {
			.literals = make([]byte, 0, 1<<10)
		}
		const  = 20
		if cap(.sequences) <  {
			.sequences = make([]seq, 0, )
		}
		// 1K
		if cap(.output) < 1<<10 {
			.output = make([]byte, 0, 1<<10)
		}
	} else {
		if cap(.literals) < maxCompressedBlockSize {
			.literals = make([]byte, 0, maxCompressedBlockSize)
		}
		const  = 2000
		if cap(.sequences) <  {
			.sequences = make([]seq, 0, )
		}
		if cap(.output) < maxCompressedBlockSize {
			.output = make([]byte, 0, maxCompressedBlockSize)
		}
	}

	if .coders.mlEnc == nil {
		.coders.mlEnc = &fseEncoder{}
		.coders.mlPrev = &fseEncoder{}
		.coders.ofEnc = &fseEncoder{}
		.coders.ofPrev = &fseEncoder{}
		.coders.llEnc = &fseEncoder{}
		.coders.llPrev = &fseEncoder{}
	}
	.litEnc = &huff0.Scratch{WantLogLess: 4}
	.reset(nil)
}

// initNewEncode can be used to reset offsets and encoders to the initial state.
func ( *blockEnc) () {
	.recentOffsets = [3]uint32{1, 4, 8}
	.litEnc.Reuse = huff0.ReusePolicyNone
	.coders.setPrev(nil, nil, nil)
}

// reset will reset the block for a new encode, but in the same stream,
// meaning that state will be carried over, but the block content is reset.
// If a previous block is provided, the recent offsets are carried over.
func ( *blockEnc) ( *blockEnc) {
	.extraLits = 0
	.literals = .literals[:0]
	.size = 0
	.sequences = .sequences[:0]
	.output = .output[:0]
	.last = false
	if  != nil {
		.recentOffsets = .prevRecentOffsets
	}
	.dictLitEnc = nil
}

// reset will reset the block for a new encode, but in the same stream,
// meaning that state will be carried over, but the block content is reset.
// If a previous block is provided, the recent offsets are carried over.
func ( *blockEnc) ( *blockEnc) {
	.coders.swap(&.coders)
	.litEnc, .litEnc = .litEnc, .litEnc
}

// blockHeader contains the information for a block header.
type blockHeader uint32

// setLast sets the 'last' indicator on a block.
func ( *blockHeader) ( bool) {
	if  {
		* = * | 1
	} else {
		const  = (1 << 24) - 2
		* = * & 
	}
}

// setSize will store the compressed size of a block.
func ( *blockHeader) ( uint32) {
	const  = 7
	* = (*)& | blockHeader(<<3)
}

// setType sets the block type.
func ( *blockHeader) ( blockType) {
	const  = 1 | (((1 << 24) - 1) ^ 7)
	* = (* & ) | blockHeader(<<1)
}

// appendTo will append the block header to a slice.
func ( blockHeader) ( []byte) []byte {
	return append(, uint8(), uint8(>>8), uint8(>>16))
}

// String returns a string representation of the block.
func ( blockHeader) () string {
	return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (>>1)&3, >>3, &1 == 1)
}

// literalsHeader contains literals header information.
type literalsHeader uint64

// setType can be used to set the type of literal block.
func ( *literalsHeader) ( literalsBlockType) {
	const  = math.MaxUint64 - 3
	* = (* & ) | literalsHeader()
}

// setSize can be used to set a single size, for uncompressed and RLE content.
func ( *literalsHeader) ( int) {
	 := bits.Len32(uint32())
	// Only retain 2 bits
	const  = 3
	 := uint64(* & )
	switch {
	case  < 5:
		 |= (uint64() << 3) | (1 << 60)
		if debugEncoder {
			 := int(>>3) & 0xff
			if  !=  {
				panic(fmt.Sprint("litRegenSize = ", , "(want) != ", , "(got)"))
			}
		}
	case  < 12:
		 |= (1 << 2) | (uint64() << 4) | (2 << 60)
	case  < 20:
		 |= (3 << 2) | (uint64() << 4) | (3 << 60)
	default:
		panic(fmt.Errorf("internal error: block too big (%d)", ))
	}
	* = literalsHeader()
}

// setSizes will set the size of a compressed literals section and the input length.
func ( *literalsHeader) (,  int,  bool) {
	,  := bits.Len32(uint32()), bits.Len32(uint32())
	// Only retain 2 bits
	const  = 3
	 := uint64(* & )
	switch {
	case  <= 10 &&  <= 10:
		if ! {
			 |= 1 << 2
		}
		 |= (uint64() << 4) | (uint64() << (10 + 4)) | (3 << 60)
		if debugEncoder {
			const  = (1 << 24) - 1
			 := ( >> 4) & 
			if int(&1023) !=  {
				panic(fmt.Sprint("regensize:", int(&1023), "!=", , ))
			}
			if int(>>10) !=  {
				panic(fmt.Sprint("compsize:", int(>>10), "!=", , ))
			}
		}
	case  <= 14 &&  <= 14:
		 |= (2 << 2) | (uint64() << 4) | (uint64() << (14 + 4)) | (4 << 60)
		if  {
			panic("single stream used with more than 10 bits length.")
		}
	case  <= 18 &&  <= 18:
		 |= (3 << 2) | (uint64() << 4) | (uint64() << (18 + 4)) | (5 << 60)
		if  {
			panic("single stream used with more than 10 bits length.")
		}
	default:
		panic("internal error: block too big")
	}
	* = literalsHeader()
}

// appendTo will append the literals header to a byte slice.
func ( literalsHeader) ( []byte) []byte {
	 := uint8( >> 60)
	switch  {
	case 1:
		 = append(, uint8())
	case 2:
		 = append(, uint8(), uint8(>>8))
	case 3:
		 = append(, uint8(), uint8(>>8), uint8(>>16))
	case 4:
		 = append(, uint8(), uint8(>>8), uint8(>>16), uint8(>>24))
	case 5:
		 = append(, uint8(), uint8(>>8), uint8(>>16), uint8(>>24), uint8(>>32))
	default:
		panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", ))
	}
	return 
}

// size returns the output size with currently set values.
func ( literalsHeader) () int {
	return int( >> 60)
}

func ( literalsHeader) () string {
	return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(&3), (>>2)&3, &((1<<60)-1)>>4, >>60)
}

// pushOffsets will push the recent offsets to the backup store.
func ( *blockEnc) () {
	.prevRecentOffsets = .recentOffsets
}

// pushOffsets will push the recent offsets to the backup store.
func ( *blockEnc) () {
	.recentOffsets = .prevRecentOffsets
}

// matchOffset will adjust recent offsets and return the adjusted one,
// if it matches a previous offset.
func ( *blockEnc) (,  uint32) uint32 {
	// Check if offset is one of the recent offsets.
	// Adjusts the output offset accordingly.
	// Gives a tiny bit of compression, typically around 1%.
	if true {
		if  > 0 {
			switch  {
			case .recentOffsets[0]:
				 = 1
			case .recentOffsets[1]:
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 = 2
			case .recentOffsets[2]:
				.recentOffsets[2] = .recentOffsets[1]
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 = 3
			default:
				.recentOffsets[2] = .recentOffsets[1]
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 += 3
			}
		} else {
			switch  {
			case .recentOffsets[1]:
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 = 1
			case .recentOffsets[2]:
				.recentOffsets[2] = .recentOffsets[1]
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 = 2
			case .recentOffsets[0] - 1:
				.recentOffsets[2] = .recentOffsets[1]
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 = 3
			default:
				.recentOffsets[2] = .recentOffsets[1]
				.recentOffsets[1] = .recentOffsets[0]
				.recentOffsets[0] = 
				 += 3
			}
		}
	} else {
		 += 3
	}
	return 
}

// encodeRaw can be used to set the output to a raw representation of supplied bytes.
func ( *blockEnc) ( []byte) {
	var  blockHeader
	.setLast(.last)
	.setSize(uint32(len()))
	.setType(blockTypeRaw)
	.output = .appendTo(.output[:0])
	.output = append(.output, ...)
	if debugEncoder {
		println("Adding RAW block, length", len(), "last:", .last)
	}
}

// encodeRaw can be used to set the output to a raw representation of supplied bytes.
func ( *blockEnc) (,  []byte) []byte {
	var  blockHeader
	.setLast(.last)
	.setSize(uint32(len()))
	.setType(blockTypeRaw)
	 = .appendTo()
	 = append(, ...)
	if debugEncoder {
		println("Adding RAW block, length", len(), "last:", .last)
	}
	return 
}

// encodeLits can be used if the block is only litLen.
func ( *blockEnc) ( []byte,  bool) error {
	var  blockHeader
	.setLast(.last)
	.setSize(uint32(len()))

	// Don't compress extremely small blocks
	if len() < 8 || (len() < 32 && .dictLitEnc == nil) ||  {
		if debugEncoder {
			println("Adding RAW block, length", len(), "last:", .last)
		}
		.setType(blockTypeRaw)
		.output = .appendTo(.output)
		.output = append(.output, ...)
		return nil
	}

	var (
		            []byte
		,  bool
		            error
	)
	if .dictLitEnc != nil {
		.litEnc.TransferCTable(.dictLitEnc)
		.litEnc.Reuse = huff0.ReusePolicyAllow
		.dictLitEnc = nil
	}
	if len() >= 1024 {
		// Use 4 Streams.
		, ,  = huff0.Compress4X(, .litEnc)
	} else if len() > 16 {
		// Use 1 stream
		 = true
		, ,  = huff0.Compress1X(, .litEnc)
	} else {
		 = huff0.ErrIncompressible
	}
	if  == nil && len()+5 > len() {
		// If we are close, we may still be worse or equal to raw.
		var  literalsHeader
		.setSizes(len(), len(), )
		if len()+.size() >= len() {
			 = huff0.ErrIncompressible
		}
	}
	switch  {
	case huff0.ErrIncompressible:
		if debugEncoder {
			println("Adding RAW block, length", len(), "last:", .last)
		}
		.setType(blockTypeRaw)
		.output = .appendTo(.output)
		.output = append(.output, ...)
		return nil
	case huff0.ErrUseRLE:
		if debugEncoder {
			println("Adding RLE block, length", len())
		}
		.setType(blockTypeRLE)
		.output = .appendTo(.output)
		.output = append(.output, [0])
		return nil
	case nil:
	default:
		return 
	}
	// Compressed...
	// Now, allow reuse
	.litEnc.Reuse = huff0.ReusePolicyAllow
	.setType(blockTypeCompressed)
	var  literalsHeader
	if  {
		if debugEncoder {
			println("Reused tree, compressed to", len())
		}
		.setType(literalsBlockTreeless)
	} else {
		if debugEncoder {
			println("New tree, compressed to", len(), "tree size:", len(.litEnc.OutTable))
		}
		.setType(literalsBlockCompressed)
	}
	// Set sizes
	.setSizes(len(), len(), )
	.setSize(uint32(len() + .size() + 1))

	// Write block headers.
	.output = .appendTo(.output)
	.output = .appendTo(.output)
	// Add compressed data.
	.output = append(.output, ...)
	// No sequences.
	.output = append(.output, 0)
	return nil
}

// encodeRLE will encode an RLE block.
func ( *blockEnc) ( byte,  uint32) {
	var  blockHeader
	.setLast(.last)
	.setSize()
	.setType(blockTypeRLE)
	.output = .appendTo(.output)
	.output = append(.output, )
}

// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder( []byte) int {
	if len() > maxSequences || len() < 2 {
		return 0
	}
	 := fseEncoder{}
	 := .Histogram()
	 := uint8(0)
	for ,  := range  {
		 =  & 63
		[] = 
		[]++
		if  >  {
			 = 
		}
	}
	if  == 0 {
		// All 0
		return 0
	}
	 := int(slices.Max([:]))
	if  == len() {
		// RLE
		return 0
	}
	.HistogramFinished(, )
	 := .normalizeCount(len())
	if  != nil {
		return 0
	}
	_,  = .writeCount(nil)
	if  != nil {
		panic()
	}
	return 1
}

// encode will encode the block and append the output in b.output.
// Previous offset codes must be pushed if more blocks are expected.
func ( *blockEnc) ( []byte, ,  bool) error {
	if len(.sequences) == 0 {
		return .encodeLits(.literals, )
	}
	if len(.sequences) == 1 && len() > 0 && len(.literals) <= 1 {
		// Check common RLE cases.
		 := .sequences[0]
		if .litLen == uint32(len(.literals)) && .offset-3 == 1 {
			// Offset == 1 and 0 or 1 literals.
			.encodeRLE([0], .sequences[0].matchLen+zstdMinMatch+.litLen)
			return nil
		}
	}

	// We want some difference to at least account for the headers.
	 := .size - len(.literals) - (.size >> 6)
	if  < 16 {
		if  == nil {
			return errIncompressible
		}
		.popOffsets()
		return .encodeLits(, )
	}

	var  blockHeader
	var  literalsHeader
	.setLast(.last)
	.setType(blockTypeCompressed)
	// Store offset of the block header. Needed when we know the size.
	 := len(.output)
	.output = .appendTo(.output)

	var (
		            []byte
		,  bool
		            error
	)
	if .dictLitEnc != nil {
		.litEnc.TransferCTable(.dictLitEnc)
		.litEnc.Reuse = huff0.ReusePolicyAllow
		.dictLitEnc = nil
	}
	if len(.literals) >= 1024 && ! {
		// Use 4 Streams.
		, ,  = huff0.Compress4X(.literals, .litEnc)
	} else if len(.literals) > 16 && ! {
		// Use 1 stream
		 = true
		, ,  = huff0.Compress1X(.literals, .litEnc)
	} else {
		 = huff0.ErrIncompressible
	}

	if  == nil && len()+5 > len(.literals) {
		// If we are close, we may still be worse or equal to raw.
		var  literalsHeader
		.setSize(len(.literals))
		 := .size()
		.setSizes(len(), len(.literals), )
		 := .size()
		if len()+ >= len(.literals)+ {
			 = huff0.ErrIncompressible
		}
	}
	switch  {
	case huff0.ErrIncompressible:
		.setType(literalsBlockRaw)
		.setSize(len(.literals))
		.output = .appendTo(.output)
		.output = append(.output, .literals...)
		if debugEncoder {
			println("Adding literals RAW, length", len(.literals))
		}
	case huff0.ErrUseRLE:
		.setType(literalsBlockRLE)
		.setSize(len(.literals))
		.output = .appendTo(.output)
		.output = append(.output, .literals[0])
		if debugEncoder {
			println("Adding literals RLE")
		}
	case nil:
		// Compressed litLen...
		if  {
			if debugEncoder {
				println("reused tree")
			}
			.setType(literalsBlockTreeless)
		} else {
			if debugEncoder {
				println("new tree, size:", len(.litEnc.OutTable))
			}
			.setType(literalsBlockCompressed)
			if debugEncoder {
				, ,  := huff0.ReadTable(, nil)
				if  != nil {
					panic()
				}
			}
		}
		.setSizes(len(), len(.literals), )
		if debugEncoder {
			printf("Compressed %d literals to %d bytes", len(.literals), len())
			println("Adding literal header:", )
		}
		.output = .appendTo(.output)
		.output = append(.output, ...)
		.litEnc.Reuse = huff0.ReusePolicyAllow
		if debugEncoder {
			println("Adding literals compressed")
		}
	default:
		if debugEncoder {
			println("Adding literals ERROR:", )
		}
		return 
	}
	// Sequence compression

	// Write the number of sequences
	switch {
	case len(.sequences) < 128:
		.output = append(.output, uint8(len(.sequences)))
	case len(.sequences) < 0x7f00: // TODO: this could be wrong
		 := len(.sequences)
		.output = append(.output, 128+uint8(>>8), uint8())
	default:
		 := len(.sequences) - 0x7f00
		.output = append(.output, 255, uint8(), uint8(>>8))
	}
	if debugEncoder {
		println("Encoding", len(.sequences), "sequences")
	}
	.genCodes()
	 := .coders.llEnc
	 := .coders.ofEnc
	 := .coders.mlEnc
	 = .normalizeCount(len(.sequences))
	if  != nil {
		return 
	}
	 = .normalizeCount(len(.sequences))
	if  != nil {
		return 
	}
	 = .normalizeCount(len(.sequences))
	if  != nil {
		return 
	}

	// Choose the best compression mode for each type.
	// Will evaluate the new vs predefined and previous.
	 := func(, ,  *fseEncoder) (*fseEncoder, seqCompMode) {
		// See if predefined/previous is better
		 := .count[:.symbolLen]
		 := .approxSize() + .maxHeaderSize()
		 := .approxSize()
		 := .approxSize()

		// Add a small penalty for new encoders.
		// Don't bother with extremely small (<2 byte gains).
		 =  + (+2*8*16)>>4
		switch {
		case  <=  &&  <=  || forcePreDef:
			if debugEncoder {
				println("Using predefined", >>3, "<=", >>3)
			}
			return , compModePredefined
		case  <= :
			if debugEncoder {
				println("Using previous", >>3, "<=", >>3)
			}
			return , compModeRepeat
		default:
			if debugEncoder {
				println("Using new, predef", >>3, ". previous:", >>3, ">", >>3, "header max:", .maxHeaderSize()>>3, "bytes")
				println("tl:", .actualTableLog, "symbolLen:", .symbolLen, "norm:", .norm[:.symbolLen], "hist", .count[:.symbolLen])
			}
			return , compModeFSE
		}
	}

	// Write compression mode
	var  uint8
	if .useRLE {
		 |= uint8(compModeRLE) << 6
		.setRLE(.sequences[0].llCode)
		if debugEncoder {
			println("llEnc.useRLE")
		}
	} else {
		var  seqCompMode
		,  = (, .coders.llPrev, &fsePredefEnc[tableLiteralLengths])
		 |= uint8() << 6
	}
	if .useRLE {
		 |= uint8(compModeRLE) << 4
		.setRLE(.sequences[0].ofCode)
		if debugEncoder {
			println("ofEnc.useRLE")
		}
	} else {
		var  seqCompMode
		,  = (, .coders.ofPrev, &fsePredefEnc[tableOffsets])
		 |= uint8() << 4
	}

	if .useRLE {
		 |= uint8(compModeRLE) << 2
		.setRLE(.sequences[0].mlCode)
		if debugEncoder {
			println("mlEnc.useRLE, code: ", .sequences[0].mlCode, "value", .sequences[0].matchLen)
		}
	} else {
		var  seqCompMode
		,  = (, .coders.mlPrev, &fsePredefEnc[tableMatchLengths])
		 |= uint8() << 2
	}
	.output = append(.output, )
	if debugEncoder {
		printf("Compression modes: 0b%b", )
	}
	.output,  = .writeCount(.output)
	if  != nil {
		return 
	}
	 := len(.output)
	.output,  = .writeCount(.output)
	if  != nil {
		return 
	}
	if false {
		println("block:", .output[:], "tablelog", .actualTableLog, "maxcount:", .maxCount)
		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", .actualTableLog, .symbolLen)
		for ,  := range .norm[:.symbolLen] {
			fmt.Printf("%3d: %5d -> %4d \n", , .count[], )
		}
	}
	.output,  = .writeCount(.output)
	if  != nil {
		return 
	}

	// Maybe in block?
	 := &.wr
	.reset(.output)

	var , ,  cState

	// Current sequence
	 := len(.sequences) - 1
	 := .sequences[]
	.setBits(llBitsTable[:])
	.setBits(mlBitsTable[:])
	.setBits(nil)

	, ,  := .ct.symbolTT[:256], .ct.symbolTT[:256], .ct.symbolTT[:256]

	// We have 3 bounds checks here (and in the loop).
	// Since we are iterating backwards it is kinda hard to avoid.
	, ,  := [.llCode], [.ofCode], [.mlCode]
	.init(, &.ct, )
	.init(, &.ct, )
	.flush32()
	.init(, &.ct, )

	// Each of these lookups also generates a bounds check.
	.addBits32NC(.litLen, .outBits)
	.addBits32NC(.matchLen, .outBits)
	.flush32()
	.addBits32NC(.offset, .outBits)
	if debugSequences {
		println("Encoded seq", , , "codes:", .llCode, .mlCode, .ofCode, "states:", .state, .state, .state, "bits:", , , )
	}
	--
	// Store sequences in reverse...
	for  >= 0 {
		 = .sequences[]

		 := [.ofCode]
		.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
		//of.encode(ofB)
		 := (uint32(.state) + .deltaNbBits) >> 16
		 := int32(.state>>(&15)) + int32(.deltaFindState)
		.addBits16NC(.state, uint8())
		.state = .stateTable[]

		// Accumulate extra bits.
		 := .outBits & 31
		 := uint64(.offset & bitMask32[])
		 := 

		 := [.mlCode]
		//ml.encode(mlB)
		 = (uint32(.state) + .deltaNbBits) >> 16
		 = int32(.state>>(&15)) + int32(.deltaFindState)
		.addBits16NC(.state, uint8())
		.state = .stateTable[]

		 = .outBits & 31
		 = << | uint64(.matchLen&bitMask32[])
		 += 

		 := [.llCode]
		//ll.encode(llB)
		 = (uint32(.state) + .deltaNbBits) >> 16
		 = int32(.state>>(&15)) + int32(.deltaFindState)
		.addBits16NC(.state, uint8())
		.state = .stateTable[]

		 = .outBits & 31
		 = << | uint64(.litLen&bitMask32[])
		 += 

		.flush32()
		.addBits64NC(, )

		if debugSequences {
			println("Encoded seq", , )
		}

		--
	}
	.flush(.actualTableLog)
	.flush(.actualTableLog)
	.flush(.actualTableLog)
	.close()
	.output = .out

	// Maybe even add a bigger margin.
	if len(.output)-3- >= .size {
		// Discard and encode as raw block.
		.output = .encodeRawTo(.output[:], )
		.popOffsets()
		.litEnc.Reuse = huff0.ReusePolicyNone
		return nil
	}

	// Size is output minus block header.
	.setSize(uint32(len(.output)-) - 3)
	if debugEncoder {
		println("Rewriting block header", )
	}
	_ = .appendTo(.output[:])
	.coders.setPrev(, , )
	return nil
}

var errIncompressible = errors.New("incompressible")

func ( *blockEnc) () {
	if len(.sequences) == 0 {
		// nothing to do
		return
	}
	if len(.sequences) > math.MaxUint16 {
		panic("can only encode up to 64K sequences")
	}
	// No bounds checks after here:
	 := .coders.llEnc.Histogram()
	 := .coders.ofEnc.Histogram()
	 := .coders.mlEnc.Histogram()
	for  := range  {
		[] = 0
	}
	for  := range  {
		[] = 0
	}
	for  := range  {
		[] = 0
	}

	var , ,  uint8
	for  := range .sequences {
		 := &.sequences[]
		 := llCode(.litLen)
		.llCode = 
		[]++
		if  >  {
			 = 
		}

		 = ofCode(.offset)
		.ofCode = 
		[]++
		if  >  {
			 = 
		}

		 = mlCode(.matchLen)
		.mlCode = 
		[]++
		if  >  {
			 = 
			if debugAsserts &&  > maxMatchLengthSymbol {
				panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", , .matchLen))
			}
		}
	}
	if debugAsserts &&  > maxMatchLengthSymbol {
		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", ))
	}
	if debugAsserts &&  > maxOffsetBits {
		panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ))
	}
	if debugAsserts &&  > maxLiteralLengthSymbol {
		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", ))
	}

	.coders.mlEnc.HistogramFinished(, int(slices.Max([:+1])))
	.coders.ofEnc.HistogramFinished(, int(slices.Max([:+1])))
	.coders.llEnc.HistogramFinished(, int(slices.Max([:+1])))
}