package blake3
import (
"bytes"
"encoding/binary"
"errors"
"hash"
"io"
"math"
"math/bits"
"runtime"
"sync"
"lukechampine.com/blake3/bao"
"lukechampine.com/blake3/guts"
)
type Hasher struct {
key [8 ]uint32
flags uint32
size int
stack [64 ][8 ]uint32
counter uint64
buf [guts .ChunkSize ]byte
buflen int
}
func (h *Hasher ) hasSubtreeAtHeight (i int ) bool {
return h .counter &(1 <<i ) != 0
}
func (h *Hasher ) pushSubtree (cv [8 ]uint32 , height int ) {
i := height
for h .hasSubtreeAtHeight (i ) {
cv = guts .ChainingValue (guts .ParentNode (h .stack [i ], cv , &h .key , h .flags ))
i ++
}
h .stack [i ] = cv
h .counter += 1 << height
}
func (h *Hasher ) rootNode () guts .Node {
n := guts .CompressChunk (h .buf [:h .buflen ], &h .key , h .counter , h .flags )
for i := bits .TrailingZeros64 (h .counter ); i < bits .Len64 (h .counter ); i ++ {
if h .hasSubtreeAtHeight (i ) {
n = guts .ParentNode (h .stack [i ], guts .ChainingValue (n ), &h .key , h .flags )
}
}
n .Flags |= guts .FlagRoot
return n
}
func (h *Hasher ) Write (p []byte ) (int , error ) {
lenp := len (p )
if h .buflen > 0 {
n := copy (h .buf [h .buflen :], p )
h .buflen += n
p = p [n :]
}
if h .buflen == len (h .buf ) && len (p ) > 0 {
n := guts .CompressChunk (h .buf [:], &h .key , h .counter , h .flags )
h .pushSubtree (guts .ChainingValue (n ), 0 )
h .buflen = 0
}
if len (p ) > len (h .buf ) {
rem := len (p ) % len (h .buf )
if rem == 0 {
rem = len (h .buf )
}
eigenbuf := bytes .NewBuffer (p [:len (p )-rem ])
trees := guts .Eigentrees (h .counter , uint64 (eigenbuf .Len ()/guts .ChunkSize ))
cvs := make ([][8 ]uint32 , len (trees ))
counter := h .counter
var wg sync .WaitGroup
for i , height := range trees {
wg .Add (1 )
go func (i int , buf []byte , counter uint64 ) {
defer wg .Done ()
cvs [i ] = guts .ChainingValue (guts .CompressEigentree (buf , &h .key , counter , h .flags ))
}(i , eigenbuf .Next ((1 <<height )*guts .ChunkSize ), counter )
counter += 1 << height
}
wg .Wait ()
for i , height := range trees {
h .pushSubtree (cvs [i ], height )
}
p = p [len (p )-rem :]
}
n := copy (h .buf [h .buflen :], p )
h .buflen += n
return lenp , nil
}
func (h *Hasher ) Sum (b []byte ) (sum []byte ) {
if total := len (b ) + h .Size (); cap (b ) >= total {
sum = b [:total ]
} else {
sum = make ([]byte , total )
copy (sum , b )
}
if dst := sum [len (b ):]; len (dst ) <= 64 {
out := guts .WordsToBytes (guts .CompressNode (h .rootNode ()))
copy (dst , out [:])
} else {
h .XOF ().Read (dst )
}
return
}
func (h *Hasher ) Reset () {
h .counter = 0
h .buflen = 0
}
func (h *Hasher ) BlockSize () int { return 64 }
func (h *Hasher ) Size () int { return h .size }
func (h *Hasher ) XOF () *OutputReader {
return &OutputReader {
n : h .rootNode (),
}
}
func newHasher(key [8 ]uint32 , flags uint32 , size int ) *Hasher {
return &Hasher {
key : key ,
flags : flags ,
size : size ,
}
}
func New (size int , key []byte ) *Hasher {
if key == nil {
return newHasher (guts .IV , 0 , size )
}
var keyWords [8 ]uint32
for i := range keyWords {
keyWords [i ] = binary .LittleEndian .Uint32 (key [i *4 :])
}
return newHasher (keyWords , guts .FlagKeyedHash , size )
}
var defaultHasher = New (64 , nil )
func Sum256 (b []byte ) (out [32 ]byte ) {
out512 := Sum512 (b )
copy (out [:], out512 [:])
return
}
func Sum512 (b []byte ) (out [64 ]byte ) {
var n guts .Node
if len (b ) <= guts .BlockSize {
var block [64 ]byte
copy (block [:], b )
return guts .WordsToBytes (guts .CompressNode (guts .Node {
CV : guts .IV ,
Block : guts .BytesToWords (block ),
BlockLen : uint32 (len (b )),
Flags : guts .FlagChunkStart | guts .FlagChunkEnd | guts .FlagRoot ,
}))
} else if len (b ) <= guts .ChunkSize {
n = guts .CompressChunk (b , &guts .IV , 0 , 0 )
n .Flags |= guts .FlagRoot
} else {
h := *defaultHasher
h .Write (b )
n = h .rootNode ()
}
return guts .WordsToBytes (guts .CompressNode (n ))
}
func DeriveKey (subKey []byte , ctx string , srcKey []byte ) {
const derivationIVLen = 32
h := newHasher (guts .IV , guts .FlagDeriveKeyContext , 32 )
h .Write ([]byte (ctx ))
derivationIV := h .Sum (make ([]byte , 0 , derivationIVLen ))
var ivWords [8 ]uint32
for i := range ivWords {
ivWords [i ] = binary .LittleEndian .Uint32 (derivationIV [i *4 :])
}
h = newHasher (ivWords , guts .FlagDeriveKeyMaterial , 0 )
h .Write (srcKey )
h .XOF ().Read (subKey )
}
type OutputReader struct {
n guts .Node
buf [guts .MaxSIMD * guts .BlockSize ]byte
off uint64
}
func (or *OutputReader ) Read (p []byte ) (int , error ) {
if or .off == math .MaxUint64 {
return 0 , io .EOF
} else if rem := math .MaxUint64 - or .off ; uint64 (len (p )) > rem {
p = p [:rem ]
}
lenp := len (p )
const bufsize = guts .MaxSIMD * guts .BlockSize
if or .off %bufsize != 0 {
n := copy (p , or .buf [or .off %bufsize :])
p = p [n :]
or .off += uint64 (n )
}
for len (p ) > 0 {
or .n .Counter = or .off / guts .BlockSize
if numBufs := len (p ) / len (or .buf ); numBufs < 1 {
guts .CompressBlocks (&or .buf , or .n )
n := copy (p , or .buf [or .off %bufsize :])
p = p [n :]
or .off += uint64 (n )
} else if numBufs == 1 {
guts .CompressBlocks ((*[bufsize ]byte )(p ), or .n )
p = p [bufsize :]
or .off += bufsize
} else {
par := min (numBufs , runtime .NumCPU ())
per := uint64 (numBufs / par )
var wg sync .WaitGroup
for range par {
wg .Add (1 )
go func (p []byte , n guts .Node ) {
defer wg .Done ()
for i := range per {
guts .CompressBlocks ((*[bufsize ]byte )(p [i *bufsize :]), n )
n .Counter += bufsize / guts .BlockSize
}
}(p , or .n )
p = p [per *bufsize :]
or .off += per * bufsize
or .n .Counter = or .off / guts .BlockSize
}
wg .Wait ()
}
}
return lenp , nil
}
func (or *OutputReader ) Seek (offset int64 , whence int ) (int64 , error ) {
off := or .off
switch whence {
case io .SeekStart :
if offset < 0 {
return 0 , errors .New ("seek position cannot be negative" )
}
off = uint64 (offset )
case io .SeekCurrent :
if offset < 0 {
if uint64 (-offset ) > off {
return 0 , errors .New ("seek position cannot be negative" )
}
off -= uint64 (-offset )
} else {
off += uint64 (offset )
}
case io .SeekEnd :
off = uint64 (offset ) - 1
default :
panic ("invalid whence" )
}
or .off = off
or .n .Counter = uint64 (off ) / guts .BlockSize
if or .off %(guts .MaxSIMD *guts .BlockSize ) != 0 {
guts .CompressBlocks (&or .buf , or .n )
}
return int64 (or .off ), nil
}
var _ hash .Hash = (*Hasher )(nil )
func BaoEncodedSize (dataLen int , outboard bool ) int {
return bao .EncodedSize (dataLen , 0 , outboard )
}
func BaoEncode (dst io .WriterAt , data io .Reader , dataLen int64 , outboard bool ) ([32 ]byte , error ) {
return bao .Encode (dst , data , dataLen , 0 , outboard )
}
func BaoDecode (dst io .Writer , data , outboard io .Reader , root [32 ]byte ) (bool , error ) {
return bao .Decode (dst , data , outboard , 0 , root )
}
func BaoEncodeBuf (data []byte , outboard bool ) ([]byte , [32 ]byte ) {
return bao .EncodeBuf (data , 0 , outboard )
}
func BaoVerifyBuf (data , outboard []byte , root [32 ]byte ) bool {
return bao .VerifyBuf (data , outboard , 0 , root )
}
The pages are generated with Golds v0.8.2 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds .