package rpc

import (
	
	
	
	
	
	
	
	
	
	
	

	

	amhelp 
	am 
	
)

var (
	ssC  = states.ClientStates
	ssCo = states.ConsumerStates
)

// Client is a type representing an RPC client that interacts with a remote
// am.Machine instance.
type Client struct {
	*ExceptionHandler

	Mach *am.Machine
	Name string

	// Addr is the address the Client will connect to.
	Addr string
	// Request the state schema from the server.
	RequestSchema bool
	// Worker is a remote am.Machine instance
	Worker *Worker
	// Consumer is the optional consumer for deliveries.
	Consumer   *am.Machine
	CallCount  uint64
	LogEnabled bool
	// DisconnCooldown is the time to wait after notifying the server about
	// disconnecting before actually disconnecting. Default 10ms.
	DisconnCooldown time.Duration
	// LastMsgAt is the last received msg from the worker TODO
	LastMsgAt time.Time
	// HelloDelay between Connected and Handshaking. Default 0, useful for
	// rpc/Mux.
	HelloDelay time.Duration
	// ReconnectOn decides if the client will try to [RetryingConn] after a
	// clean [Disconnect].
	ReconnectOn bool

	// failsafe - connection

	// ConnTimeout is the maximum time to wait for a connection to be established.
	// Default 3s.
	ConnTimeout time.Duration
	// ConnRetries is the number of retries for a connection. Default 15.
	ConnRetries int
	// ConnRetryTimeout is the maximum time to retry a connection. Default 1m.
	ConnRetryTimeout time.Duration
	// ConnRetryDelay is the time to wait between retries. Default 100ms. If
	// ConnRetryBackoff is set, this is the initial delay, and doubles on each
	// retry.
	ConnRetryDelay time.Duration
	// ConnRetryBackoff is the maximum time to wait between retries. Default 3s.
	ConnRetryBackoff time.Duration

	// failsafe - calls

	// CallTimeout is the maximum time to wait for a call to complete. Default 3s.
	CallTimeout time.Duration
	// CallRetries is the number of retries for a call. Default 15.
	CallRetries int
	// CallRetryTimeout is the maximum time to retry a call. Default 1m.
	CallRetryTimeout time.Duration
	// CallRetryDelay is the time to wait between retries. Default 100ms. If
	// CallRetryBackoff is set, this is the initial delay, and doubles on each
	// retry.
	CallRetryDelay time.Duration
	// CallRetryBackoff is the maximum time to wait between retries. Default 3s.
	CallRetryBackoff time.Duration

	DisconnTimeout time.Duration

	// internal

	callLock     sync.Mutex
	rpc          *rpc2.Client
	workerStates am.S
	workerSchema am.Schema
	conn         net.Conn
	// tmpTestErr is an error to return on the next call or notify, only for
	// testing.
	tmpTestErr error
	// permTestErr is an error to return on the next call or notify, only for
	// testing.
	permTestErr    error
	connRetryRound atomic.Int32
}

// interfaces
var (
	_ clientRpcMethods    = &Client{}
	_ clientServerMethods = &Client{}
)

// NewClient creates a new RPC client and exposes a remote state machine as
// a remote worker, with a subst of the API under Client.Worker. Optionally
// takes a consumer, which is a state machine with a WorkerPayload state. See
// states.ConsumerStates.
func (
	 context.Context,  string,  string,  am.Schema,
	 am.S,  *ClientOpts,
) (*Client, error) {
	// validate
	if  == "" {
		return nil, errors.New("rpcc: workerAddr required")
	}
	if  == nil {
		return nil, errors.New("rpcc: stateStruct required")
	}
	if  == nil {
		return nil, errors.New("rpcc: stateNames required")
	}

	if  == "" {
		 = "rpc"
	}
	if  == nil {
		 = &ClientOpts{}
	}

	 := &Client{
		Name:             ,
		ExceptionHandler: &ExceptionHandler{},
		LogEnabled:       os.Getenv(EnvAmRpcLogClient) != "",
		Addr:             ,
		CallTimeout:      3 * time.Second,
		ConnTimeout:      3 * time.Second,
		DisconnTimeout:   3 * time.Second,
		DisconnCooldown:  10 * time.Millisecond,
		ReconnectOn:      true,

		ConnRetryTimeout: 1 * time.Minute,
		ConnRetries:      15,
		ConnRetryDelay:   100 * time.Millisecond,
		ConnRetryBackoff: 3 * time.Second,

		CallRetryTimeout: 1 * time.Minute,
		CallRetries:      15,
		CallRetryDelay:   100 * time.Millisecond,
		CallRetryBackoff: 3 * time.Second,

		workerStates: slices.Clone(),
		workerSchema: maps.Clone(),
	}

	if amhelp.IsDebug() {
		.CallTimeout = 100 * time.Second
	}

	// state machine
	,  := am.NewCommon(, GetClientId(), states.ClientSchema,
		ssC.Names(), , .Parent, &am.Opts{Tags: []string{
			"rpc-client",
			"addr:" + ,
		}})
	if  != nil {
		return nil, 
	}
	.SemLogger().SetArgsMapper(LogArgs)
	.Mach = 
	// optional env debug
	if os.Getenv(EnvAmRpcDbg) != "" {
		amhelp.MachDebugEnv()
	}

	// TODO debug
	// mach.AddBreakpoint(nil, am.S{ssC.Disconnected})

	if .Consumer != nil {

		 := amhelp.Implements(.Consumer.StateNames(), ssCo.Names())
		if  != nil {
			 := fmt.Errorf(
				"consumer has to implement pkg/rpc/states/ConsumerStatesDef: %w", )

			return nil, 
		}
		.Consumer = .Consumer
	}

	return , nil
}

// ///// ///// /////

// ///// HANDLERS

// ///// ///// /////

func ( *Client) ( *am.Event) {
	 := .Mach.NewStateCtx(ssC.Start)
	,  := NewWorker(, "", , .workerSchema, .workerStates, .Mach,
		nil)
	if  != nil {
		.Mach.AddErr(, nil)
	}
	.Worker = 
	// optional env debug
	if os.Getenv(EnvAmRpcDbg) != "" {
		amhelp.MachDebugEnv()
	}
}

func ( *Client) ( *am.Event) {
	// gather state from before the transition
	 := .Transition().TimeBefore
	 := .Machine().Index1

	// if never connected, stop here
	if .Is([]int{(ssC.Connecting), (ssC.Exception)}) {
		return
	}

	// graceful disconnect
	 := .Is1((ssC.Connecting)) || .Is1((ssC.Connected))
	if  {
		.Mach.EvAdd1(, ssC.Disconnecting, nil)
	}
}

func ( *Client) ( *am.Event) {
	 := .Mach.NewStateCtx(ssC.Connecting)

	// async
	go func() {
		if .Err() != nil {
			return // expired
		}

		// net dial
		 := .ConnTimeout
		if amhelp.IsDebug() {
			 = 100 * time.Second
		}
		// TODO TLS
		 := net.Dialer{
			Timeout: ,
		}
		.Mach.Log("dialing %s", .Addr)
		,  := .DialContext(, "tcp4", .Addr)
		if .Err() != nil {
			return // expired
		}
		if  != nil {
			.Mach.EvAdd1(, ssC.Disconnected, nil)
			AddErrNetwork(, .Mach, )
			return
		}
		.conn = 

		// rpc
		.bindRpcHandlers()
		go .rpc.Run()

		.Mach.EvAdd1(, ssC.Connected, nil)
	}()
}

func ( *Client) ( *am.Event) bool {
	return .rpc != nil && .conn != nil
}

func ( *Client) ( *am.Event) {
	 := .Mach.NewStateCtx(ssC.Disconnecting)

	go func() {
		if .Err() != nil {
			return // expired
		}

		// notify the server and wait a bit
		.notify(, ServerBye.Value, &Empty{})
		if !amhelp.Wait(, .DisconnCooldown) {
			.ensureGroupConnected()

			return // expired
		}

		// close with timeout
		if .rpc != nil {
			select {
			case <-time.After(.DisconnTimeout):
				.log("rpc.Close timeout")
			case <-amhelp.ExecAndClose(func() {
				_ = .rpc.Close()
			}):
				.log("rpc.Close")
			case <-.Done():
				// expired
			}
		}
		if .Err() != nil {
			.ensureGroupConnected()

			return // expired
		}

		.Mach.EvAdd1(, ssC.Disconnected, nil)
	}()
}

func ( *Client) ( *am.Event) {
	 := .Mach.NewStateCtx(ssC.Connected)
	 := .rpc.DisconnectNotify()
	// reset reconn counter
	.connRetryRound.Store(0)

	go func() {
		select {

		case <-.Done():
			return // expired

		case <-:
			.log("rpc.DisconnectNotify")
			.Mach.EvAdd1(, ssC.Disconnected, nil)
		}
	}()
}

func ( *Client) ( *am.Event) bool {
	// graceful disconnect
	return !.Mach.WillBe1(ssC.Disconnecting)
}

func ( *Client) ( *am.Event) {
	// try to reconnect
	 := .Transition().TimeBefore.Any1
	if (.Mach.Index1(ssC.Connected), .Mach.Index1(ssC.Connecting)) &&
		.ReconnectOn {

		.Mach.EvAdd1(, ssC.RetryingConn, nil)
		return
	}

	// ignore error when disconnecting
	if .conn != nil {
		_ = .conn.Close()
	}
}

func ( *Client) ( *am.Event) {
	 := .Mach.NewStateCtx(ssC.Connected)

	// unblock
	go func() {
		if .Err() != nil {
			return // expired
		}

		// send hello or retry conn
		 := &RespHandshake{}
		if .HelloDelay > 0 {
			if !amhelp.Wait(, .HelloDelay) {
				return // expired
			}
		}

		// retry to pass cmux
		 := false
		 := .CallRetryDelay
		// shorten the timeout
		 := .CallTimeout / 2
		for  := 0;  < .ConnRetries; ++ {
			// TODO pass ID and key here
			 := ArgsHello{ReqSchema: .RequestSchema}
			if .call(, ServerHello.Value, , , ) {
				 = true
				.log("hello ok on %d try", +1)

				break
			}
			if !amhelp.Wait(, ) {
				return // expired
			}

			// double the delay when backoff set
			if .CallRetryBackoff > 0 {
				 *= 2
				if  > .CallRetryBackoff {
					 = .CallRetryBackoff
				}
			}
		}
		if ! {
			.Mach.EvAdd1(, ssC.RetryingConn, nil)
			return
		}

		// validate
		 := .Serialized.StateNames
		if len() == 0 {
			AddErrRpcStr(, .Mach, "states missing")
			return
		}
		if .Serialized.ID == "" {
			AddErrRpcStr(, .Mach, "ID missing")
			return
		}
		if .RequestSchema && .Schema == nil {
			AddErrRpcStr(, .Mach, "schema missing")
			return
		}

		// schema
		.RequestSchema = false
		if .Schema != nil {
			.updateSchema()
		}

		// ID as tag TODO find-n-replace the tag, not via index [1]
		.Worker.tags[1] = "src-id:" + .Serialized.ID
		// TODO setter
		.Worker.remoteId = .Serialized.ID

		// compare states
		if !am.StatesEqual(.workerStates, ) {
			AddErrRpcStr(, .Mach, "States differ on client/server")
			return
		}

		// confirm the handshake or retry conn
		// TODO pass ID and key here
		if !.call(, ServerHandshake.Value, .Mach.Id(), &Empty{}, 0) {
			.Mach.EvAdd1(, ssC.RetryingConn, nil)
			return
		}

		// finalize
		.Mach.EvAdd1(, ssC.HandshakeDone, Pass(&A{
			Id:        .Serialized.ID,
			MachTime:  .Serialized.Time,
			QueueTick: .Serialized.QueueTick,
		}))
	}()
}

func ( *Client) ( *RespHandshake) {
	// TODO move to Worker.SetSchema???
	 := .Worker
	.schemaMx.Lock()
	defer .schemaMx.Unlock()
	.clockMx.Lock()
	defer .clockMx.Unlock()
	.workerSchema = .Schema
	.workerStates = .Serialized.StateNames

	// save in the client
	.schema = .Schema
	.stateNames = .Serialized.StateNames
	.queueTick = .Serialized.QueueTick
	.machTime = .Serialized.Time
	for ,  := range .stateNames {
		.machClock[] = .machTime[]
	}
}

func ( *Client) ( *am.Event) bool {
	 := ParseArgs(.Args)
	return .Id != "" && .MachTime != nil && .QueueTick > 0
}

func ( *Client) ( *am.Event) {
	 := ParseArgs(.Args)

	// finalize the worker init
	 := .Worker
	.id = "rw-" + .Name
	.updateClock(nil, .MachTime, .QueueTick)

	.log("connected to %s", .Worker.id)
	.log("time t%d q%d: %v",
		.Worker.Time(nil).Sum(nil), .Worker.QueueTick(), .MachTime)
}

func ( *Client) ( *am.Event) {
	.Mach.EvRemove1(, ssC.CallRetryFailed, nil)

	// TODO disconnect after N failed retries
	// TODO backoff and reconnect (retry the whole connection)
}

func ( *Client) ( *am.Event) bool {
	return .Mach.Any1(ssC.Connected, ssC.RetryingConn)
}

// ExceptionState handles network errors and retries the connection.
func ( *Client) ( *am.Event) {
	// call super
	.ExceptionHandler.ExceptionState()
	.Mach.EvRemove1(, am.StateException, nil)
	// TODO handle am.ErrSchema:
	//  "worker has to implement pkg/rpc/states/WorkerStatesDef"
	//  only for nondeterministic machs
}

// RetryingConnState should be set without Connecting in the same tx
func ( *Client) ( *am.Event) {
	 := .Mach.NewStateCtx(ssC.RetryingConn)
	 := .ConnRetryDelay
	 := time.Now()

	// unblock
	go func() {
		// retry loop
		for .Err() == nil && .connRetryRound.Load() < int32(.ConnRetries) {
			.connRetryRound.Add(1)

			// wait for time or exit
			if !amhelp.Wait(, ) {
				return // expired
			}

			// try
			amhelp.Add1Block(, .Mach, ssC.Connecting, nil)
			if .Err() != nil {
				return // expired
			}

			_ = amhelp.WaitForErrAny(, .ConnTimeout*2, .Mach,
				.Mach.WhenNot1(ssC.Connecting, ))
			if .Err() != nil {
				return // expired
			}
			// remover err
			.Mach.EvRemove1(, ssC.Exception, nil)

			// double the delay when backoff set
			if .ConnRetryBackoff > 0 {
				 *= 2
				if  > .ConnRetryBackoff {
					 = .ConnRetryBackoff
				}
			}

			if .ConnRetryTimeout > 0 && time.Since() > .ConnRetryTimeout {
				break
			}
		}

		// next
		if .Err() != nil {
			return // expired
		}
		.Mach.EvRemove1(, ssC.RetryingConn, nil)
		.Mach.EvAdd1(, ssC.ConnRetryFailed, nil)
	}()
}

func ( *Client) ( *am.Event) bool {
	if .Consumer == nil {
		return false
	}
	 := ParseArgs(.Args)
	 := &A{Name: .Name}

	if .Payload == nil {
		 := errors.New("invalid payload")
		.Mach.AddErrState(ssC.ErrDelivery, , Pass())

		return false
	}

	return true
}

func ( *Client) ( *am.Event) {
	 := ParseArgs(.Args)
	 := &A{
		Name:    .Name,
		Payload: .Payload,
	}

	.Consumer.EvAdd1(, ssCo.WorkerPayload, Pass())
}

func ( *Client) ( *am.Event) {
	.Mach.EvRemove1(, ssC.Healthcheck, nil)
	.ensureGroupConnected()
}

// ///// ///// /////

// ///// METHODS

// ///// ///// /////

// Start connects the client to the server and initializes the worker.
// Results in the Ready state.
func ( *Client) () am.Result {
	return .Mach.Add(am.S{ssC.Start, ssC.Connecting}, nil)
}

// Stop disconnects the client from the server and disposes the worker.
//
// waitTillExit: if passed, waits for the client to disconnect using the
// context.
func ( *Client) ( context.Context,  bool) am.Result {
	 := .Mach.Remove1(ssC.Start, nil)
	// wait for the client to disconnect
	if  != am.Canceled &&  != nil {
		// TODO timeout config
		_ = amhelp.WaitForAll(, 2*time.Second,
			.Mach.When1(ssC.Disconnected, nil))
	}

	if  {
		.log("disposing")
		.Mach.Dispose()
		.Worker.Dispose()
	}

	return 
}

// GetKind returns a kind of the RPC component (server / client).
func ( *Client) () Kind {
	return KindClient
}

// ensureGroupConnected ensures that at least one state from  GroupConnected
// is active.
func ( *Client) ( *am.Event) {
	 := states.ClientGroups.Connected
	if !.Mach.Any1(...) && !.Mach.WillBe() {
		.Mach.EvAdd1(, ssC.Disconnected, nil)
	}
}

// ///// ///// /////

// ///// INTERNAL

// ///// ///// /////

func ( *Client) ( string,  ...any) {
	if !.LogEnabled {
		return
	}
	.Mach.Log(, ...)
}

func ( *Client) ( net.Conn) {
	.log("new rpc2 client")

	.rpc = rpc2.NewClient()
	.rpc.Handle(ClientSetClock.Value, .RemoteSetClock)
	.rpc.Handle(ClientPushAllTicks.Value, .RemotePushAllTicks)
	.rpc.Handle(ClientSendPayload.Value, .RemoteSendPayload)
	.rpc.Handle(ClientBye.Value, .RemoteBye)
	.rpc.Handle(ClientSchemaChange.Value, .RemoteSchemaChange)

	// wait for reply on each req
	.rpc.SetBlocking(true)
}

func ( *Client) (
	 *ClockMsg,  am.Time,  uint64,
) {
	if .Mach.Not1(ssC.HandshakeDone) {
		return
	}

	// lock the worker TODO not great
	.Worker.clockMx.Lock()
	var  am.Time
	var  uint64

	// diff update
	if  != nil {
		// diff clock update
		,  = ClockFromMsg(.Worker.machTime, .Worker.queueTick, )
		 := Checksum(.Sum(nil), )

		// verify
		if  != .Checksum {
			// TODO request full update
			.Mach.Log("updateClock mismatch %d != %d", .Checksum, )
			.log("msg q%d ch%d %+v", .QueueTick, .Checksum, .Updates)
			.log("clock t%d q%d ch%d (%+v)", .Sum(nil), , , )
			.Worker.clockMx.Unlock()

			return
		}

		// full update
	} else {
		 = 
		 = 
	}

	// err
	if  == nil {
		.Worker.clockMx.Unlock()
		return
	}

	var  uint64
	for ,  := range  {
		 += 
	}

	if  != nil {
		.log("updateClock diff OK t%d q%d", , )
	} else {
		.log("updateClock full OK t%d q%d", , )
	}

	.Worker.InternalUpdateClock(, , false)
}

func ( *Client) (
	 context.Context,  string, ,  any,
) bool {
	 := ServerMethods.Parse().Value

	// validate
	if .rpc == nil {
		AddErrNoConn(nil, .Mach, errors.New())
		return false
	}

	// concurrency
	.callLock.Lock()
	defer .callLock.Unlock()

	// success path
	if .call(, , , , 0) {
		return true
	}

	// failure, retry
	 := time.Now()
	 := false
	 := .CallRetryDelay
	.Mach.Add1(ssC.RetryingCall, Pass(&A{
		Method:    ,
		StartedAt: ,
	}))

	// cleanup
	defer func() {
		if  {
			.Mach.Remove1(ssC.RetryingCall, nil)
		} else {
			.Mach.Add1(ssC.CallRetryFailed, Pass(&A{Method: }))
		}
	}()

	// retry loop
	for  := 0;  < .CallRetries; ++ {
		// wait for time or exit
		if !amhelp.Wait(, ) {
			return false
		}

		// wait for state or exit
		<-.Mach.When1(ssC.Ready, )
		if .Err() != nil {
			return false // expired
		}

		// call, again
		if .call(, , , , 0) {
			 = true
			return true
		}

		// double the delay when backoff set
		if .CallRetryBackoff > 0 {
			 *= 2
			if  > .CallRetryBackoff {
				 = .CallRetryBackoff
			}
		}

		if .CallRetryTimeout > 0 && time.Since() > .CallRetryTimeout {
			break
		}
	}

	// fail
	return false
}

func ( *Client) (
	 context.Context,  string, ,  any,  time.Duration,
) bool {
	defer .Mach.PanicToErr(nil)
	 := ServerMethods.Parse().Value

	// call
	.CallCount++
	if  == 0 {
		 = .CallTimeout
	}
	,  := context.WithTimeout(, )
	defer ()
	 := .rpc.CallWithContext(, , , )
	if .Err() != nil {
		return false // expired
	}

	// err timeout
	if .Err() != nil {
		.Mach.AddErrState(ssC.ErrNetworkTimeout, .Err(), nil)
		return false
	}
	// err test
	if .tmpTestErr != nil {
		AddErrNetwork(nil, .Mach, fmt.Errorf("%w: %s", .tmpTestErr, ))
		.tmpTestErr = nil
		return false
	}
	// err test
	if .permTestErr != nil {
		AddErrNetwork(nil, .Mach, fmt.Errorf("%w: %s", .tmpTestErr, ))
		return false
	}
	// err
	if  != nil {
		// TODO specific err?
		AddErr(nil, .Mach, , )
		return false
	}

	return true
}

func ( *Client) (
	 context.Context,  string,  any,
) bool {
	 := ServerMethods.Parse().Value

	// validate
	if .rpc == nil {
		AddErrNoConn(nil, .Mach, errors.New())
		return false
	}

	// concurrency
	.callLock.Lock()
	defer .callLock.Unlock()

	// success path
	if .notify(, , ) {
		return true
	}

	// failure, retry
	 := time.Now()
	 := false
	 := .CallRetryDelay
	.Mach.Add1(ssC.RetryingCall, Pass(&A{
		Method:    ,
		StartedAt: ,
	}))

	// cleanup
	defer func() {
		if  {
			.Mach.Remove1(ssC.RetryingCall, nil)
		} else {
			.Mach.Add1(ssC.CallRetryFailed, Pass(&A{Method: }))
		}
	}()

	// retry loop
	for  := 0;  < .CallRetries; ++ {
		time.Sleep()

		// call, again
		if .notify(, , ) {
			return true
		}

		// double the delay when backoff set
		if .CallRetryBackoff > 0 {
			 *= 2
			if  > .CallRetryBackoff {
				 = .CallRetryBackoff
			}
		}

		if .CallRetryTimeout > 0 && time.Since() > .CallRetryTimeout {
			break
		}
	}

	// fail
	return false
}

func ( *Client) (
	 context.Context,  string,  any,
) bool {
	defer .Mach.PanicToErr(nil)
	 := ServerMethods.Parse().Value

	// timeout
	 := .conn.SetDeadline(time.Now().Add(.CallTimeout))
	if  != nil {
		AddErr(nil, .Mach, , )
		return false
	}

	// call
	.CallCount++
	 = .rpc.Notify(, )
	if .Err() != nil {
		return false // expired
	}

	// err
	if  != nil {
		AddErr(nil, .Mach, , )
		return false
	}

	// remove timeout
	 = .conn.SetDeadline(time.Time{})
	if  != nil {
		AddErr(nil, .Mach, , )
		return false
	}

	return true
}

// ///// ///// /////

// ///// REMOTE METHODS

// ///// ///// /////

// RemoteSetClock updates the client's clock. Only called by the server.
func ( *Client) (
	 *rpc2.Client,  *ClockMsg,  *Empty,
) error {
	// validate
	if  == nil {
		AddErrParams(nil, .Mach, nil)
		return nil
	}

	// execute
	.updateClock(, nil, 0)

	return nil
}

// RemotePushAllTicks log all the machine clock's ticks, so all final handlers
// can be executed in order. Only called by the server. TODO
func ( *Client) (
	 *rpc2.Client,  []PushAllTicks,  *Empty,
) error {
	// TODO implement, test

	// for _, push := range clocks {
	// 	// validate
	// 	if push.ClockMsg == nil || push.Mutation == nil {
	// 		AddErrParams(nil, c.Mach, nil)
	// 		return nil
	// 	}
	//
	// 	// execute TODO
	// 	// c.InternalUpdateClock(clock, nil)
	// }

	return nil
}

// RemoteSendingPayload triggers the WorkerDelivering state, which is an
// optional indication that the server has started a data transmission to the
// Client. This payload shouldn't contain the data itself, only the name and
// token.
func ( *Client) (
	 *rpc2.Client,  *ArgsPayload,  *Empty,
) error {
	// TODO test
	.log("RemoteSendingPayload %s", .Name)
	.Mach.Add1(ssC.WorkerDelivering, Pass(&A{
		Payload: ,
		Name:    .Name,
	}))

	return nil
}

// RemoteSendPayload receives a payload from the server and triggers
// WorkerPayload. The Consumer should bind his handlers and handle this state to
// receive the data.
func ( *Client) (
	 *rpc2.Client,  *ArgsPayload,  *Empty,
) error {
	.log("RemoteSendPayload %s:%s", .Name, .Token)
	.Mach.Add1(ssC.WorkerPayload, Pass(&A{
		Payload: ,
		Name:    .Name,
	}))

	return nil
}

// RemoteBye is called by the server on a planned disconnect.
// TODO take a reason / source event?
func ( *Client) (
	 *rpc2.Client,  *Empty,  *Empty,
) error {
	// TODO check if this expected / covers all scenarios
	.Mach.Remove1(ssC.Start, nil)
	return nil
}

// RemoteSchemaChange is called by the server on a source machine schema change.
func ( *Client) (
	 *rpc2.Client,  *RespHandshake,  *Empty,
) error {
	.log("new schema v" + strconv.Itoa(len(.Serialized.StateNames)))
	.updateSchema()
	return nil
}

// ///// ///// /////

// ///// MISC

// ///// ///// /////

type ClientOpts struct {
	// PayloadState is a state for the server to listen on, to deliver payloads
	// to the client. The client adds this state to request a payload from the
	// worker. Default: am/rpc/states/WorkerStates.SendPayload.
	Consumer *am.Machine
	// Parent is a parent state machine for a new Client state machine. See
	// [am.Opts].
	Parent am.Api
}

// GetClientId returns an RPC Client machine ID from a name. This ID will be
// used to handshake the server.
func ( string) string {
	return "rc-" + 
}