package xxh3

import 

// Hash returns the hash of the byte slice.
func ( []byte) uint64 {
	return hashAny(*(*str)(ptr(&)))
}

// Hash returns the hash of the string slice.
func ( string) uint64 {
	return hashAny(*(*str)(ptr(&)))
}

func hashAny( str) ( u64) {
	,  := .p, .l

	switch {
	case  <= 16:
		switch {
		case  > 8: // 9-16
			 := readU64(, 0) ^ (key64_024 ^ key64_032)
			 := readU64(, ui()-8) ^ (key64_040 ^ key64_048)
			 := mulFold64(, )
			return xxh3Avalanche(u64() + bits.ReverseBytes64() +  + )

		case  > 3: // 4-8
			 := readU32(, 0)
			 := readU32(, ui()-4)
			 := u64() + u64()<<32
			 :=  ^ (key64_008 ^ key64_016)
			return rrmxmx(, u64())

		case  == 3: // 3
			 := u64(readU16(, 0))
			 := u64(readU8(, 2))
			 = <<16 +  + 3<<8

		case  > 1: // 2
			 := u64(readU16(, 0))
			 = *(1<<24+1)>>8 + 2<<8

		case  == 1: // 1
			 := u64(readU8(, 0))
			 = *(1<<24+1<<16+1) + 1<<8

		default: // 0
			return 0x2d06800538d394c2 // xxh_avalanche(key64_056 ^ key64_064)
		}

		 ^= u64(key32_000 ^ key32_004)
		return xxhAvalancheSmall()

	case  <= 128:
		 = u64() * prime64_1

		if  > 32 {
			if  > 64 {
				if  > 96 {
					 += mulFold64(readU64(, 6*8)^key64_096, readU64(, 7*8)^key64_104)
					 += mulFold64(readU64(, ui()-8*8)^key64_112, readU64(, ui()-7*8)^key64_120)
				} // 96
				 += mulFold64(readU64(, 4*8)^key64_064, readU64(, 5*8)^key64_072)
				 += mulFold64(readU64(, ui()-6*8)^key64_080, readU64(, ui()-5*8)^key64_088)
			} // 64
			 += mulFold64(readU64(, 2*8)^key64_032, readU64(, 3*8)^key64_040)
			 += mulFold64(readU64(, ui()-4*8)^key64_048, readU64(, ui()-3*8)^key64_056)
		} // 32
		 += mulFold64(readU64(, 0*8)^key64_000, readU64(, 1*8)^key64_008)
		 += mulFold64(readU64(, ui()-2*8)^key64_016, readU64(, ui()-1*8)^key64_024)

		return xxh3Avalanche()

	case  <= 240:
		 = u64() * prime64_1

		 += mulFold64(readU64(, 0*16+0)^key64_000, readU64(, 0*16+8)^key64_008)
		 += mulFold64(readU64(, 1*16+0)^key64_016, readU64(, 1*16+8)^key64_024)
		 += mulFold64(readU64(, 2*16+0)^key64_032, readU64(, 2*16+8)^key64_040)
		 += mulFold64(readU64(, 3*16+0)^key64_048, readU64(, 3*16+8)^key64_056)
		 += mulFold64(readU64(, 4*16+0)^key64_064, readU64(, 4*16+8)^key64_072)
		 += mulFold64(readU64(, 5*16+0)^key64_080, readU64(, 5*16+8)^key64_088)
		 += mulFold64(readU64(, 6*16+0)^key64_096, readU64(, 6*16+8)^key64_104)
		 += mulFold64(readU64(, 7*16+0)^key64_112, readU64(, 7*16+8)^key64_120)

		// avalanche
		 = xxh3Avalanche()

		// trailing groups after 128
		 := ui() &^ 15
		for  := ui(8 * 16);  < ;  += 16 {
			 += mulFold64(readU64(, +0)^readU64(key, -125), readU64(, +8)^readU64(key, -117))
		}

		// last 16 bytes
		 += mulFold64(readU64(, ui()-16)^key64_119, readU64(, ui()-8)^key64_127)

		return xxh3Avalanche()

	default:
		 = u64() * prime64_1

		 := [8]u64{
			prime32_3, prime64_1, prime64_2, prime64_3,
			prime64_4, prime32_2, prime64_5, prime32_1,
		}

		if hasAVX512 &&  >= avx512Switch {
			accumAVX512(&, , key, u64())
		} else if hasAVX2 {
			accumAVX2(&, , key, u64())
		} else if hasSSE2 {
			accumSSE(&, , key, u64())
		} else {
			accumScalar(&, , key, u64())
		}

		// merge accs
		 += mulFold64([0]^key64_011, [1]^key64_019)
		 += mulFold64([2]^key64_027, [3]^key64_035)
		 += mulFold64([4]^key64_043, [5]^key64_051)
		 += mulFold64([6]^key64_059, [7]^key64_067)

		return xxh3Avalanche()
	}
}