// Package blake3 implements the BLAKE3 cryptographic hash function.
package blake3 // import "lukechampine.com/blake3" import ( ) // Hasher implements hash.Hash. type Hasher struct { key [8]uint32 flags uint32 size int // output size, for Sum // log(n) set of Merkle subtree roots, at most one per height. stack [64][8]uint32 counter uint64 // number of buffers hashed; also serves as a bit vector indicating which stack elems are occupied buf [guts.ChunkSize]byte buflen int } func ( *Hasher) ( int) bool { return .counter&(1<<) != 0 } func ( *Hasher) ( [8]uint32, int) { // seek to first open stack slot, merging subtrees as we go := for .hasSubtreeAtHeight() { = guts.ChainingValue(guts.ParentNode(.stack[], , &.key, .flags)) ++ } .stack[] = .counter += 1 << } // rootNode computes the root of the Merkle tree. It does not modify the // stack. func ( *Hasher) () guts.Node { := guts.CompressChunk(.buf[:.buflen], &.key, .counter, .flags) for := bits.TrailingZeros64(.counter); < bits.Len64(.counter); ++ { if .hasSubtreeAtHeight() { = guts.ParentNode(.stack[], guts.ChainingValue(), &.key, .flags) } } .Flags |= guts.FlagRoot return } // Write implements hash.Hash. func ( *Hasher) ( []byte) (int, error) { := len() // align to chunk boundary if .buflen > 0 { := copy(.buf[.buflen:], ) .buflen += = [:] } if .buflen == len(.buf) && len() > 0 { := guts.CompressChunk(.buf[:], &.key, .counter, .flags) .pushSubtree(guts.ChainingValue(), 0) .buflen = 0 } // process full chunks if len() > len(.buf) { := len() % len(.buf) if == 0 { = len(.buf) // don't prematurely compress } := bytes.NewBuffer([:len()-]) := guts.Eigentrees(.counter, uint64(.Len()/guts.ChunkSize)) := make([][8]uint32, len()) := .counter var sync.WaitGroup for , := range { .Add(1) go func( int, []byte, uint64) { defer .Done() [] = guts.ChainingValue(guts.CompressEigentree(, &.key, , .flags)) }(, .Next((1<<)*guts.ChunkSize), ) += 1 << } .Wait() for , := range { .pushSubtree([], ) } = [len()-:] } // buffer remaining partial chunk := copy(.buf[.buflen:], ) .buflen += return , nil } // Sum implements hash.Hash. func ( *Hasher) ( []byte) ( []byte) { // We need to append h.Size() bytes to b. Reuse b's capacity if possible; // otherwise, allocate a new slice. if := len() + .Size(); cap() >= { = [:] } else { = make([]byte, ) copy(, ) } // Read into the appended portion of sum. Use a low-latency-low-throughput // path for small digests (requiring a single compression), and a // high-latency-high-throughput path for large digests. if := [len():]; len() <= 64 { := guts.WordsToBytes(guts.CompressNode(.rootNode())) copy(, [:]) } else { .XOF().Read() } return } // Reset implements hash.Hash. func ( *Hasher) () { .counter = 0 .buflen = 0 } // BlockSize implements hash.Hash. func ( *Hasher) () int { return 64 } // Size implements hash.Hash. func ( *Hasher) () int { return .size } // XOF returns an OutputReader initialized with the current hash state. func ( *Hasher) () *OutputReader { return &OutputReader{ n: .rootNode(), } } func newHasher( [8]uint32, uint32, int) *Hasher { return &Hasher{ key: , flags: , size: , } } // New returns a Hasher for the specified digest size and key. If key is nil, // the hash is unkeyed. Otherwise, len(key) must be 32. func ( int, []byte) *Hasher { if == nil { return newHasher(guts.IV, 0, ) } var [8]uint32 for := range { [] = binary.LittleEndian.Uint32([*4:]) } return newHasher(, guts.FlagKeyedHash, ) } // Sum256 and Sum512 always use the same hasher state, so we can save some time // when hashing small inputs by constructing the hasher ahead of time. var defaultHasher = New(64, nil) // Sum256 returns the unkeyed BLAKE3 hash of b, truncated to 256 bits. func ( []byte) ( [32]byte) { := Sum512() copy([:], [:]) return } // Sum512 returns the unkeyed BLAKE3 hash of b, truncated to 512 bits. func ( []byte) ( [64]byte) { var guts.Node if len() <= guts.BlockSize { var [64]byte copy([:], ) return guts.WordsToBytes(guts.CompressNode(guts.Node{ CV: guts.IV, Block: guts.BytesToWords(), BlockLen: uint32(len()), Flags: guts.FlagChunkStart | guts.FlagChunkEnd | guts.FlagRoot, })) } else if len() <= guts.ChunkSize { = guts.CompressChunk(, &guts.IV, 0, 0) .Flags |= guts.FlagRoot } else { := *defaultHasher .Write() = .rootNode() } return guts.WordsToBytes(guts.CompressNode()) } // DeriveKey derives a subkey from ctx and srcKey. ctx should be hardcoded, // globally unique, and application-specific. A good format for ctx strings is: // // [application] [commit timestamp] [purpose] // // e.g.: // // example.com 2019-12-25 16:18:03 session tokens v1 // // The purpose of these requirements is to ensure that an attacker cannot trick // two different applications into using the same context string. func ( []byte, string, []byte) { // construct the derivation Hasher const = 32 := newHasher(guts.IV, guts.FlagDeriveKeyContext, 32) .Write([]byte()) := .Sum(make([]byte, 0, )) var [8]uint32 for := range { [] = binary.LittleEndian.Uint32([*4:]) } = newHasher(, guts.FlagDeriveKeyMaterial, 0) // derive the subKey .Write() .XOF().Read() } // An OutputReader produces an seekable stream of 2^64 - 1 pseudorandom output // bytes. type OutputReader struct { n guts.Node buf [guts.MaxSIMD * guts.BlockSize]byte off uint64 } // Read implements io.Reader. Callers may assume that Read returns len(p), nil // unless the read would extend beyond the end of the stream. func ( *OutputReader) ( []byte) (int, error) { if .off == math.MaxUint64 { return 0, io.EOF } else if := math.MaxUint64 - .off; uint64(len()) > { = [:] } := len() // drain existing buffer const = guts.MaxSIMD * guts.BlockSize if .off% != 0 { := copy(, .buf[.off%:]) = [:] .off += uint64() } for len() > 0 { .n.Counter = .off / guts.BlockSize if := len() / len(.buf); < 1 { guts.CompressBlocks(&.buf, .n) := copy(, .buf[.off%:]) = [:] .off += uint64() } else if == 1 { guts.CompressBlocks((*[]byte)(), .n) = [:] .off += } else { // parallelize := min(, runtime.NumCPU()) := uint64( / ) var sync.WaitGroup for range { .Add(1) go func( []byte, guts.Node) { defer .Done() for := range { guts.CompressBlocks((*[]byte)([*:]), ) .Counter += / guts.BlockSize } }(, .n) = [*:] .off += * .n.Counter = .off / guts.BlockSize } .Wait() } } return , nil } // Seek implements io.Seeker. func ( *OutputReader) ( int64, int) (int64, error) { := .off switch { case io.SeekStart: if < 0 { return 0, errors.New("seek position cannot be negative") } = uint64() case io.SeekCurrent: if < 0 { if uint64(-) > { return 0, errors.New("seek position cannot be negative") } -= uint64(-) } else { += uint64() } case io.SeekEnd: = uint64() - 1 default: panic("invalid whence") } .off = .n.Counter = uint64() / guts.BlockSize if .off%(guts.MaxSIMD*guts.BlockSize) != 0 { guts.CompressBlocks(&.buf, .n) } // NOTE: or.off >= 2^63 will result in a negative return value. // Nothing we can do about this. return int64(.off), nil } // ensure that Hasher implements hash.Hash var _ hash.Hash = (*Hasher)(nil) // EncodedSize returns the size of a Bao encoding for the provided quantity // of data. // // Deprecated: Use bao.EncodedSize instead. func ( int, bool) int { return bao.EncodedSize(, 0, ) } // BaoEncode computes the intermediate BLAKE3 tree hashes of data and writes // them to dst. // // Deprecated: Use bao.Encode instead. func ( io.WriterAt, io.Reader, int64, bool) ([32]byte, error) { return bao.Encode(, , , 0, ) } // BaoDecode reads content and tree data from the provided reader(s), and // streams the verified content to dst. // // Deprecated: Use bao.Decode instead. func ( io.Writer, , io.Reader, [32]byte) (bool, error) { return bao.Decode(, , , 0, ) } // BaoEncodeBuf returns the Bao encoding and root (i.e. BLAKE3 hash) for data. // // Deprecated: Use bao.EncodeBuf instead. func ( []byte, bool) ([]byte, [32]byte) { return bao.EncodeBuf(, 0, ) } // BaoVerifyBuf verifies the Bao encoding and root (i.e. BLAKE3 hash) for data. // // Deprecated: Use bao.VerifyBuf instead. func (, []byte, [32]byte) bool { return bao.VerifyBuf(, , 0, ) }