// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package arrow

import (
	
	

	
)

// Table represents a logical sequence of chunked arrays of equal length. It is
// similar to a Record except that the columns are ChunkedArrays instead,
// allowing for a Table to be built up by chunks progressively whereas the columns
// in a single Record are always each a single contiguous array.
type Table interface {
	Schema() *Schema
	NumRows() int64
	NumCols() int64
	Column(i int) *Column

	// AddColumn adds a new column to the table and a corresponding field (of the same type)
	// to its schema, at the specified position. Returns the new table with updated columns and schema.
	AddColumn(pos int, f Field, c Column) (Table, error)

	Retain()
	Release()

	fmt.Stringer
}

// Column is an immutable column data structure consisting of
// a field (type metadata) and a chunked data array.
//
// To get strongly typed data from a Column, you need to iterate the
// chunks and type assert each individual Array. For example:
//
//	switch column.DataType().ID() {
//	case arrow.INT32:
//		for _, c := range column.Data().Chunks() {
//			arr := c.(*array.Int32)
//			// do something with arr
//		}
//	case arrow.INT64:
//		for _, c := range column.Data().Chunks() {
//			arr := c.(*array.Int64)
//			// do something with arr
//		}
//	case ...
//	}
type Column struct {
	field Field
	data  *Chunked
}

// NewColumnFromArr is a convenience function to create a column from
// a field and a non-chunked array.
//
// This provides a simple mechanism for bypassing the middle step of
// constructing a Chunked array of one and then releasing it because
// of the ref counting.
func ( Field,  Array) Column {
	if !TypeEqual(.Type, .DataType()) {
		panic(fmt.Errorf("%w: arrow/array: inconsistent data type %s vs %s", ErrInvalid, .Type, .DataType()))
	}

	.Retain()
	 := Column{
		field: ,
		data: &Chunked{
			chunks: []Array{},
			length: .Len(),
			nulls:  .NullN(),
			dtype:  .Type,
		},
	}
	.data.refCount.Add(1)
	return 
}

// NewColumn returns a column from a field and a chunked data array.
//
// NewColumn panics if the field's data type is inconsistent with the data type
// of the chunked data array.
func ( Field,  *Chunked) *Column {
	 := Column{
		field: ,
		data:  ,
	}
	.data.Retain()

	if !TypeEqual(.data.DataType(), .field.Type) {
		.data.Release()
		panic(fmt.Errorf("%w: arrow/array: inconsistent data type %s vs %s", ErrInvalid, .data.DataType(), .field.Type))
	}

	return &
}

// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func ( *Column) () {
	.data.Retain()
}

// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func ( *Column) () {
	.data.Release()
}

func ( *Column) () int           { return .data.Len() }
func ( *Column) () int         { return .data.NullN() }
func ( *Column) () *Chunked     { return .data }
func ( *Column) () Field       { return .field }
func ( *Column) () string       { return .field.Name }
func ( *Column) () DataType { return .field.Type }

// Chunked manages a collection of primitives arrays as one logical large array.
type Chunked struct {
	refCount atomic.Int64

	chunks []Array

	length int
	nulls  int
	dtype  DataType
}

// NewChunked returns a new chunked array from the slice of arrays.
//
// NewChunked panics if the chunks do not have the same data type.
func ( DataType,  []Array) *Chunked {
	 := &Chunked{
		chunks: make([]Array, 0, len()),
		dtype:  ,
	}
	.refCount.Add(1)

	for ,  := range  {
		if  == nil {
			continue
		}

		if !TypeEqual(.DataType(), ) {
			panic(fmt.Errorf("%w: arrow/array: mismatch data type %s vs %s", ErrInvalid, .DataType().String(), .String()))
		}
		.Retain()
		.chunks = append(.chunks, )
		.length += .Len()
		.nulls += .NullN()
	}
	return 
}

// Retain increases the reference count by 1.
// Retain may be called simultaneously from multiple goroutines.
func ( *Chunked) () {
	.refCount.Add(1)
}

// Release decreases the reference count by 1.
// When the reference count goes to zero, the memory is freed.
// Release may be called simultaneously from multiple goroutines.
func ( *Chunked) () {
	debug.Assert(.refCount.Load() > 0, "too many releases")

	if .refCount.Add(-1) == 0 {
		for ,  := range .chunks {
			.Release()
		}
		.chunks = nil
		.length = 0
		.nulls = 0
	}
}

func ( *Chunked) () int           { return .length }
func ( *Chunked) () int         { return .nulls }
func ( *Chunked) () DataType { return .dtype }
func ( *Chunked) () []Array    { return .chunks }
func ( *Chunked) ( int) Array  { return .chunks[] }