package xxh3

import 

// HashSeed returns the hash of the byte slice with given seed.
func ( []byte,  uint64) uint64 {
	return hashAnySeed(*(*str)(ptr(&)), )

}

// HashStringSeed returns the hash of the string slice with given seed.
func ( string,  uint64) uint64 {
	return hashAnySeed(*(*str)(ptr(&)), )
}

func hashAnySeed( str,  uint64) ( u64) {
	,  := .p, .l

	switch {
	case  <= 16:
		switch {
		case  > 8:
			 := readU64(, 0) ^ (key64_024 ^ key64_032 + )
			 := readU64(, ui()-8) ^ (key64_040 ^ key64_048 - )
			 := mulFold64(, )
			return xxh3Avalanche(u64() + bits.ReverseBytes64() +  + )

		case  > 3:
			 ^= u64(bits.ReverseBytes32(u32())) << 32
			 := readU32(, 0)
			 := readU32(, ui()-4)
			 := u64() + u64()<<32
			 :=  ^ (key64_008 ^ key64_016 - )
			return rrmxmx(, u64())

		case  == 3: // 3
			 := u64(readU16(, 0))
			 := u64(readU8(, 2))
			 = <<16 +  + 3<<8

		case  > 1: // 2
			 := u64(readU16(, 0))
			 = *(1<<24+1)>>8 + 2<<8

		case  == 1: // 1
			 := u64(readU8(, 0))
			 = *(1<<24+1<<16+1) + 1<<8

		default:
			return xxhAvalancheSmall( ^ key64_056 ^ key64_064)
		}

		 ^= u64(key32_000^key32_004) + 
		return xxhAvalancheSmall()

	case  <= 128:
		 = u64() * prime64_1

		if  > 32 {
			if  > 64 {
				if  > 96 {
					 += mulFold64(readU64(, 6*8)^(key64_096+), readU64(, 7*8)^(key64_104-))
					 += mulFold64(readU64(, ui()-8*8)^(key64_112+), readU64(, ui()-7*8)^(key64_120-))
				} // 96
				 += mulFold64(readU64(, 4*8)^(key64_064+), readU64(, 5*8)^(key64_072-))
				 += mulFold64(readU64(, ui()-6*8)^(key64_080+), readU64(, ui()-5*8)^(key64_088-))
			} // 64
			 += mulFold64(readU64(, 2*8)^(key64_032+), readU64(, 3*8)^(key64_040-))
			 += mulFold64(readU64(, ui()-4*8)^(key64_048+), readU64(, ui()-3*8)^(key64_056-))
		} // 32
		 += mulFold64(readU64(, 0*8)^(key64_000+), readU64(, 1*8)^(key64_008-))
		 += mulFold64(readU64(, ui()-2*8)^(key64_016+), readU64(, ui()-1*8)^(key64_024-))

		return xxh3Avalanche()

	case  <= 240:
		 = u64() * prime64_1

		 += mulFold64(readU64(, 0*16+0)^(key64_000+), readU64(, 0*16+8)^(key64_008-))
		 += mulFold64(readU64(, 1*16+0)^(key64_016+), readU64(, 1*16+8)^(key64_024-))
		 += mulFold64(readU64(, 2*16+0)^(key64_032+), readU64(, 2*16+8)^(key64_040-))
		 += mulFold64(readU64(, 3*16+0)^(key64_048+), readU64(, 3*16+8)^(key64_056-))
		 += mulFold64(readU64(, 4*16+0)^(key64_064+), readU64(, 4*16+8)^(key64_072-))
		 += mulFold64(readU64(, 5*16+0)^(key64_080+), readU64(, 5*16+8)^(key64_088-))
		 += mulFold64(readU64(, 6*16+0)^(key64_096+), readU64(, 6*16+8)^(key64_104-))
		 += mulFold64(readU64(, 7*16+0)^(key64_112+), readU64(, 7*16+8)^(key64_120-))

		// avalanche
		 = xxh3Avalanche()

		// trailing groups after 128
		 := ui() &^ 15
		for  := ui(8 * 16);  < ;  += 16 {
			 += mulFold64(readU64(, +0)^(readU64(key, -125)+), readU64(, +8)^(readU64(key, -117)-))
		}

		// last 16 bytes
		 += mulFold64(readU64(, ui()-16)^(key64_119+), readU64(, ui()-8)^(key64_127-))

		return xxh3Avalanche()

	default:
		 = u64() * prime64_1

		 := key
		if  != 0 {
			 = ptr(&[secretSize]byte{})
			initSecret(, )
		}

		 := [8]u64{
			prime32_3, prime64_1, prime64_2, prime64_3,
			prime64_4, prime32_2, prime64_5, prime32_1,
		}

		if hasAVX512 &&  >= avx512Switch {
			accumAVX512(&, , , u64())
		} else if hasAVX2 {
			accumAVX2(&, , , u64())
		} else if hasSSE2 {
			accumSSE(&, , , u64())
		} else {
			accumScalarSeed(&, , , u64())
		}

		// merge accs
		 += mulFold64([0]^readU64(, 11), [1]^readU64(, 19))
		 += mulFold64([2]^readU64(, 27), [3]^readU64(, 35))
		 += mulFold64([4]^readU64(, 43), [5]^readU64(, 51))
		 += mulFold64([6]^readU64(, 59), [7]^readU64(, 67))

		return xxh3Avalanche()
	}
}