package iceberg

import (
	
	
	
	
	
	

	
)

func ( ManifestEntryStatus,  int64,  DataFile) ManifestEntry {
	return &manifestEntryV1{
		EntryStatus: ,
		Snapshot:    ,
		Data:        ,
	}
}

type DataFileBuilder struct {
	*dataFile
}

func (
	 string,
	 FileFormat,
	 map[string]any,
	 int64,
	 int64,
) DataFileBuilder {
	return DataFileBuilder{
		dataFile: &dataFile{
			Path:          ,
			Format:        ,
			PartitionData: ,
			RecordCount:   ,
			FileSize:      ,
		},
	}
}

func ( DataFileBuilder) () DataFile {
	return .dataFile
}

func ( DataFileBuilder) ( map[int]int64) DataFileBuilder {
	.ColSizes = avroColMapFromMap[int, int64]()
	return 
}

func ( DataFileBuilder) ( map[int]int64) DataFileBuilder {
	.ValCounts = avroColMapFromMap[int, int64]()
	return 
}

func ( DataFileBuilder) ( map[int]int64) DataFileBuilder {
	.NullCounts = avroColMapFromMap[int, int64]()
	return 
}

func ( DataFileBuilder) ( map[int]int64) DataFileBuilder {
	.NaNCounts = avroColMapFromMap[int, int64]()
	return 
}

func ( DataFileBuilder) ( map[int]int64) DataFileBuilder {
	.DistinctCounts = avroColMapFromMap[int, int64]()
	return 
}

func ( DataFileBuilder) ( map[int][]byte) DataFileBuilder {
	.LowerBounds = avroColMapFromMap[int, []byte]()
	return 
}

func ( DataFileBuilder) ( map[int][]byte) DataFileBuilder {
	.UpperBounds = avroColMapFromMap[int, []byte]()
	return 
}

func ( DataFileBuilder) ( []byte) DataFileBuilder {
	.Key = &
	return 
}

func ( DataFileBuilder) ( []int64) DataFileBuilder {
	.Splits = &
	return 
}

func ( DataFileBuilder) ( int) DataFileBuilder {
	.SortOrder = &
	return 
}

func ( io.Writer,  []ManifestFile) error {
	,  := ocf.NewEncoder(
		AvroManifestListV1Schema,
		,
		ocf.WithMetadata(map[string][]byte{
			"avro.codec": []byte("deflate"),
		}),
		ocf.WithCodec(ocf.Deflate),
	)
	if  != nil {
		return 
	}
	defer .Close()

	for ,  := range  {
		if  := .Encode();  != nil {
			return 
		}
	}

	return nil
}

func ( io.Writer,  *Schema,  []ManifestEntry) error {
	,  := json.Marshal()
	if  != nil {
		return fmt.Errorf("failed to marshal schema: %w", )
	}

	,  := ocf.NewEncoder(
		AvroSchemaFromEntriesV1(),
		,
		ocf.WithMetadata(map[string][]byte{
			"format-version": []byte("1"),
			"schema":         ,
			//"partition-spec": []byte("todo"), // TODO
			"avro.codec": []byte("deflate"),
		}),
		ocf.WithCodec(ocf.Deflate),
	)
	if  != nil {
		return 
	}
	defer .Close()

	for ,  := range  {
		if  := .Encode();  != nil {
			return 
		}
	}

	return nil
}

// AvroSchemaFromEntriesV1 creates an Avro schema from the given manifest entries.
// The entries must all share the same partition spec.
func ( []ManifestEntry) string {
	 := [0].DataFile().Partition() // Pull the first entries partition spec since they are expected to be the same for all entries.
	 := 1000                        // According to the spec partition field IDs start at 1000. https://iceberg.apache.org/spec/#partition-evolution
	 := &bytes.Buffer{}
	if  := template.Must(
		template.New("EntryV1Schema").
			Funcs(template.FuncMap{
				"PartitionFieldID": func( any) int {
					 := 
					++
					return 
				},
				"Type": func( any) string {
					switch t := .(type) {
					case string:
						return `["null", "string"]`
					case int:
						return `["null", "int"]`
					case int64:
						return `["null", "long"]`
					case []byte:
						return `["null", "bytes"]`
					case time.Time:
						return `["null", {"type": "int", "logicalType": "date"}]`
					default:
						panic(fmt.Sprintf("unsupported type %T", ))
					}
				},
			}).
			Parse(AvroEntryV1SchemaTmpl)).Execute(, );  != nil {
		panic()
	}

	return .String()
}