//go:build unix

package alloc

import (
	

	

	
)

func (,  uint64) experimental.LinearMemory {
	// Round up to the page size.
	 := uint64(unix.Getpagesize() - 1)
	 := ( + ) &^ 

	if  > math.MaxInt {
		// This ensures int(res) overflows to a negative value,
		// and unix.Mmap returns EINVAL.
		 = math.MaxUint64
	}

	 := 
	 := unix.PROT_READ | unix.PROT_WRITE
	if  <  { // Commit memory only if cap=max.
		 = 0
		 = unix.PROT_NONE
	}

	// Reserve res bytes of address space, to ensure we won't need to move it.
	// A protected, private, anonymous mapping should not commit memory.
	,  := unix.Mmap(-1, 0, int(), , unix.MAP_PRIVATE|unix.MAP_ANON)
	if  != nil {
		panic()
	}
	return &mmappedMemory{buf: [:]}
}

// The slice covers the entire mmapped memory:
//   - len(buf) is the already committed memory,
//   - cap(buf) is the reserved address space.
type mmappedMemory struct {
	buf []byte
}

func ( *mmappedMemory) ( uint64) []byte {
	 := uint64(len(.buf))
	 := uint64(cap(.buf))
	if  <  &&  <=  {
		// Grow geometrically, round up to the page size.
		 := uint64(unix.Getpagesize() - 1)
		 :=  + >>3
		 = min(max(, ), )
		 = ( + ) &^ 

		// Commit additional memory up to new bytes.
		 := unix.Mprotect(.buf[:], unix.PROT_READ|unix.PROT_WRITE)
		if  != nil {
			return nil
		}

		.buf = .buf[:] // Update committed memory.
	}
	// Limit returned capacity because bytes beyond
	// len(m.buf) have not yet been committed.
	return .buf[::len(.buf)]
}

func ( *mmappedMemory) () {
	 := unix.Munmap(.buf[:cap(.buf)])
	if  != nil {
		panic()
	}
	.buf = nil
}