diff --git a/pkg/cmd/trace.go b/pkg/cmd/trace.go index f1fa65f3..993568bf 100644 --- a/pkg/cmd/trace.go +++ b/pkg/cmd/trace.go @@ -177,7 +177,7 @@ func summaryStats(tr trace.Trace) { // contents of a given column. type ColSummariser struct { name string - summary func(trace.Column) string + summary func(*trace.Column) string } var colSummarisers []ColSummariser = []ColSummariser{ @@ -187,15 +187,15 @@ var colSummarisers []ColSummariser = []ColSummariser{ {"unique", uniqueSummariser}, } -func rowSummariser(col trace.Column) string { +func rowSummariser(col *trace.Column) string { return fmt.Sprintf("%d rows", col.Data().Len()) } -func widthSummariser(col trace.Column) string { +func widthSummariser(col *trace.Column) string { return fmt.Sprintf("%d bits", col.Data().BitWidth()) } -func bytesSummariser(col trace.Column) string { +func bytesSummariser(col *trace.Column) string { bitwidth := col.Data().BitWidth() byteWidth := bitwidth / 8 // Determine proper bytewidth @@ -206,7 +206,7 @@ func bytesSummariser(col trace.Column) string { return fmt.Sprintf("%d bytes", col.Data().Len()*byteWidth) } -func uniqueSummariser(col trace.Column) string { +func uniqueSummariser(col *trace.Column) string { data := col.Data() elems := util.NewHashSet[util.BytesKey](data.Len() / 2) // Add all the elements diff --git a/pkg/schema/assignment/byte_decomposition.go b/pkg/schema/assignment/byte_decomposition.go index 3e230377..f18b7217 100644 --- a/pkg/schema/assignment/byte_decomposition.go +++ b/pkg/schema/assignment/byte_decomposition.go @@ -59,38 +59,35 @@ func (p *ByteDecomposition) IsComputed() bool { // Assignment Interface // ============================================================================ -// ExpandTrace expands a given trace to include the columns specified by a given -// ByteDecomposition. This requires computing the value of each byte column in -// the decomposition. -func (p *ByteDecomposition) ExpandTrace(tr trace.Trace) error { +// ComputeColumns computes the values of columns defined by this assignment. +// This requires computing the value of each byte column in the decomposition. +func (p *ByteDecomposition) ComputeColumns(tr trace.Trace) ([]*trace.Column, error) { columns := tr.Columns() // Calculate how many bytes required. n := len(p.targets) // Identify source column source := columns.Get(p.source) + // Determine padding values + padding := decomposeIntoBytes(source.Padding(), n) // Construct byte column data - cols := make([]util.FrArray, n) + cols := make([]*trace.Column, n) // Initialise columns for i := 0; i < n; i++ { + ith := p.targets[i] + // Construct a byte array for ith byte + data := util.NewFrArray(source.Height(), 8) // Construct a byte column for ith byte - cols[i] = util.NewFrArray(source.Height(), 8) + cols[i] = trace.NewColumn(ith.Context(), ith.Name(), data, padding[i]) } // Decompose each row of each column for i := uint(0); i < source.Height(); i = i + 1 { ith := decomposeIntoBytes(source.Get(int(i)), n) for j := 0; j < n; j++ { - cols[j].Set(i, ith[j]) + cols[j].Data().Set(i, ith[j]) } } - // Determine padding values - padding := decomposeIntoBytes(source.Padding(), n) - // Finally, add byte columns to trace - for i := 0; i < n; i++ { - ith := p.targets[i] - columns.Add(ith.Context(), ith.Name(), cols[i], padding[i]) - } // Done - return nil + return cols, nil } // RequiredSpillage returns the minimum amount of spillage required to ensure diff --git a/pkg/schema/assignment/computed_column.go b/pkg/schema/assignment/computed_column.go index 8836093e..08b781bd 100644 --- a/pkg/schema/assignment/computed_column.go +++ b/pkg/schema/assignment/computed_column.go @@ -72,15 +72,15 @@ func (p *ComputedColumn[E]) RequiredSpillage() uint { return p.expr.Bounds().End } -// ExpandTrace attempts to a new column to the trace which contains the result -// of evaluating a given expression on each row. If the column already exists, -// then an error is flagged. -func (p *ComputedColumn[E]) ExpandTrace(tr trace.Trace) error { +// ComputeColumns computes the values of columns defined by this assignment. +// Specifically, this creates a new column which contains the result of +// evaluating a given expression on each row. +func (p *ComputedColumn[E]) ComputeColumns(tr trace.Trace) ([]*trace.Column, error) { columns := tr.Columns() // Check whether a column already exists with the given name. if _, ok := columns.IndexOf(p.target.Context().Module(), p.Name()); ok { mod := tr.Modules().Get(p.target.Context().Module()) - return fmt.Errorf("computed column already exists ({%s.%s})", mod.Name(), p.Name()) + return nil, fmt.Errorf("computed column already exists ({%s.%s})", mod.Name(), p.Name()) } // Extract length multipiler multiplier := p.target.Context().LengthMultiplier() @@ -102,10 +102,10 @@ func (p *ComputedColumn[E]) ExpandTrace(tr trace.Trace) error { // that all columns return their padding value which is then used to compute // the padding value for *this* column. padding := p.expr.EvalAt(-1, tr) - // Colunm needs to be expanded. - columns.Add(p.target.Context(), p.Name(), data, padding) + // Construct column + col := trace.NewColumn(p.target.Context(), p.Name(), data, padding) // Done - return nil + return []*trace.Column{col}, nil } // Dependencies returns the set of columns that this assignment depends upon. diff --git a/pkg/schema/assignment/interleave.go b/pkg/schema/assignment/interleave.go index 6d3529dc..1b28d734 100644 --- a/pkg/schema/assignment/interleave.go +++ b/pkg/schema/assignment/interleave.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/consensys/go-corset/pkg/schema" + "github.com/consensys/go-corset/pkg/trace" tr "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" ) @@ -65,10 +66,10 @@ func (p *Interleaving) RequiredSpillage() uint { return uint(0) } -// ExpandTrace expands a given trace to include the columns specified by a given -// Interleaving. This requires copying the data in the source columns to create -// the interleaved column. -func (p *Interleaving) ExpandTrace(tr tr.Trace) error { +// ComputeColumns computes the values of columns defined by this assignment. +// This requires copying the data in the source columns to create the +// interleaved column. +func (p *Interleaving) ComputeColumns(tr trace.Trace) ([]*trace.Column, error) { columns := tr.Columns() ctx := p.target.Context() // Byte width records the largest width of any column. @@ -80,7 +81,7 @@ func (p *Interleaving) ExpandTrace(tr tr.Trace) error { bit_width = max(bit_width, ith.Type().BitWidth()) // Sanity check no column already exists with this name. if _, ok := columns.IndexOf(ctx.Module(), ith.Name()); ok { - return fmt.Errorf("interleaved column already exists ({%s})", ith.Name()) + return nil, fmt.Errorf("interleaved column already exists ({%s})", ith.Name()) } } // Determine interleaving width @@ -110,9 +111,9 @@ func (p *Interleaving) ExpandTrace(tr tr.Trace) error { // column in the interleaving. padding := columns.Get(0).Padding() // Colunm needs to be expanded. - columns.Add(ctx, p.target.Name(), data, padding) + col := trace.NewColumn(ctx, p.target.Name(), data, padding) // - return nil + return []*trace.Column{col}, nil } // Dependencies returns the set of columns that this assignment depends upon. diff --git a/pkg/schema/assignment/lexicographic_sort.go b/pkg/schema/assignment/lexicographic_sort.go index ed68da0f..6d78a1ff 100644 --- a/pkg/schema/assignment/lexicographic_sort.go +++ b/pkg/schema/assignment/lexicographic_sort.go @@ -67,46 +67,49 @@ func (p *LexicographicSort) RequiredSpillage() uint { return uint(0) } -// ExpandTrace adds columns as needed to support the LexicographicSortingGadget. -// That includes the delta column, and the bit selectors. -func (p *LexicographicSort) ExpandTrace(tr trace.Trace) error { +// ComputeColumns computes the values of columns defined as needed to support +// the LexicographicSortingGadget. That includes the delta column, and the bit +// selectors. +func (p *LexicographicSort) ComputeColumns(tr trace.Trace) ([]*trace.Column, error) { columns := tr.Columns() zero := fr.NewElement(0) one := fr.NewElement(1) + first := p.targets[0] // Exact number of columns involved in the sort - ncols := len(p.sources) + nbits := len(p.sources) // multiplier := p.context.LengthMultiplier() // Determine how many rows to be constrained. nrows := tr.Modules().Get(p.context.Module()).Height() * multiplier // Initialise new data columns - bit := make([]util.FrArray, ncols) + cols := make([]*trace.Column, nbits+1) // Byte width records the largest width of any column. bit_width := uint(0) - - for i := 0; i < ncols; i++ { - // TODO: following can be optimised to use a single bit per element, - // rather than an entire byte. - bit[i] = util.NewFrArray(nrows, 1) - ith := columns.Get(p.sources[i]) - bit_width = max(bit_width, ith.Data().BitWidth()) - } - + // delta := util.NewFrArray(nrows, bit_width) + cols[0] = trace.NewColumn(first.Context(), first.Name(), delta, &zero) + // + for i := 0; i < nbits; i++ { + target := p.targets[1+i] + source := columns.Get(p.sources[i]) + data := util.NewFrArray(nrows, 1) + cols[i+1] = trace.NewColumn(target.Context(), target.Name(), data, &zero) + bit_width = max(bit_width, source.Data().BitWidth()) + } for i := uint(0); i < nrows; i++ { set := false // Initialise delta to zero delta.Set(i, &zero) // Decide which row is the winner (if any) - for j := 0; j < ncols; j++ { + for j := 0; j < nbits; j++ { prev := columns.Get(p.sources[j]).Get(int(i - 1)) curr := columns.Get(p.sources[j]).Get(int(i)) if !set && prev != nil && prev.Cmp(curr) != 0 { var diff fr.Element - bit[j].Set(i, &one) + cols[j+1].Data().Set(i, &one) // Compute curr - prev if p.signs[j] { diff.Set(curr) @@ -118,20 +121,12 @@ func (p *LexicographicSort) ExpandTrace(tr trace.Trace) error { set = true } else { - bit[j].Set(i, &zero) + cols[j+1].Data().Set(i, &zero) } } } - // Add delta column data - first := p.targets[0] - columns.Add(first.Context(), first.Name(), delta, &zero) - // Add bit column data - for i := 0; i < ncols; i++ { - ith := p.targets[1+i] - columns.Add(ith.Context(), ith.Name(), bit[i], &zero) - } // Done. - return nil + return cols, nil } // Dependencies returns the set of columns that this assignment depends upon. diff --git a/pkg/schema/assignment/sorted_permutation.go b/pkg/schema/assignment/sorted_permutation.go index bdd3831e..387157e1 100644 --- a/pkg/schema/assignment/sorted_permutation.go +++ b/pkg/schema/assignment/sorted_permutation.go @@ -117,42 +117,42 @@ func (p *SortedPermutation) RequiredSpillage() uint { return uint(0) } -// ExpandTrace expands a given trace to include the columns specified by a given -// SortedPermutation. This requires copying the data in the source columns, and -// sorting that data according to the permutation criteria. -func (p *SortedPermutation) ExpandTrace(tr tr.Trace) error { +// ComputeColumns computes the values of columns defined by this assignment. +// This requires copying the data in the source columns, and sorting that data +// according to the permutation criteria. +func (p *SortedPermutation) ComputeColumns(tr trace.Trace) ([]*trace.Column, error) { columns := tr.Columns() // Ensure target columns don't exist for i := p.Columns(); i.HasNext(); { name := i.Next().Name() // Sanity check no column already exists with this name. if _, ok := columns.IndexOf(p.context.Module(), name); ok { - return fmt.Errorf("permutation column already exists ({%s})", name) + return nil, fmt.Errorf("permutation column already exists ({%s})", name) } } - cols := make([]util.FrArray, len(p.sources)) + data := make([]util.FrArray, len(p.sources)) // Construct target columns for i := 0; i < len(p.sources); i++ { src := p.sources[i] // Read column data - data := columns.Get(src).Data() + src_data := columns.Get(src).Data() // Clone it to initialise permutation. - cols[i] = data.Clone() + data[i] = src_data.Clone() } // Sort target columns - util.PermutationSort(cols, p.signs) - // Physically add the columns - index := 0 - - for i := p.Columns(); i.HasNext(); index++ { - ith := i.Next() + util.PermutationSort(data, p.signs) + // Physically construct the columns + cols := make([]*trace.Column, len(p.sources)) + // + for i, iter := 0, p.Columns(); iter.HasNext(); i++ { + ith := iter.Next() dstColName := ith.Name() - srcCol := tr.Columns().Get(p.sources[index]) - columns.Add(ith.Context(), dstColName, cols[index], srcCol.Padding()) + srcCol := tr.Columns().Get(p.sources[i]) + cols[i] = trace.NewColumn(ith.Context(), dstColName, data[i], srcCol.Padding()) } // - return nil + return cols, nil } // Dependencies returns the set of columns that this assignment depends upon. diff --git a/pkg/schema/schema.go b/pkg/schema/schema.go index de02371a..d77a8796 100644 --- a/pkg/schema/schema.go +++ b/pkg/schema/schema.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/consensys/gnark-crypto/ecc/bls12-377/fr" + "github.com/consensys/go-corset/pkg/trace" tr "github.com/consensys/go-corset/pkg/trace" "github.com/consensys/go-corset/pkg/util" ) @@ -53,11 +54,12 @@ type Declaration interface { type Assignment interface { Declaration - // ExpandTrace expands a given trace to include "computed - // columns". These are columns which do not exist in the - // original trace, but are added during trace expansion to - // form the final trace. - ExpandTrace(tr.Trace) error + // ComputeColumns computes the values of columns defined by this assignment. + // In order for this computation to makes sense, all columns on which this + // assignment depends must exist (e.g. are either inputs or have been + // computed already). Computed columns do not exist in the original trace, + // but are added during trace expansion to form the final trace. + ComputeColumns(tr.Trace) ([]*trace.Column, error) // RequiredSpillage returns the minimum amount of spillage required to ensure // valid traces are accepted in the presence of arbitrary padding. Note, // spillage is currently assumed to be required only at the front of a diff --git a/pkg/schema/schemas.go b/pkg/schema/schemas.go index 34a33213..6f613457 100644 --- a/pkg/schema/schemas.go +++ b/pkg/schema/schemas.go @@ -1,10 +1,7 @@ package schema import ( - "fmt" - tr "github.com/consensys/go-corset/pkg/trace" - "github.com/consensys/go-corset/pkg/util" ) // JoinContexts combines one or more evaluation contexts together. If all @@ -64,19 +61,30 @@ func RequiredSpillage(schema Schema) uint { // Observe that assignments have to be computed in the correct order. func ExpandTrace(schema Schema, trace tr.Trace) error { index := schema.InputColumns().Count() - m := schema.Assignments().Count() - batchjobs := make([]expandTraceJob, m) + //m := schema.Assignments().Count() + //batchjobs := make([]expandTraceJob, m) // Compute each assignment in turn for i, j := schema.Assignments(), uint(0); i.HasNext(); j++ { // Get ith assignment ith := i.Next() // Compute ith assignment(s) - batchjobs[j] = expandTraceJob{index, ith, trace} + //batchjobs[j] = expandTraceJob{index, ith, trace} + cols, err := ith.ComputeColumns(trace) + // Check error + if err != nil { + return err + } + // Add all columns + for k := 0; k < len(cols); k++ { + kth := cols[k] + trace.Columns().Add(kth.Context(), kth.Name(), kth.Data(), kth.Padding()) + } // Update index index += ith.Columns().Count() } // - return util.ParExec[expandTraceJob](batchjobs) + // return util.ParExec[expandTraceJob](batchjobs) + return nil } // Accepts determines whether this schema will accept a given trace. That @@ -119,48 +127,3 @@ func ColumnIndexOf(schema Schema, module uint, name string) (uint, bool) { return c.Context().Module() == module && c.Name() == name }) } - -// ---------------------------------------------------------------------------- - -// ExpandTraceJob represents a unit of work which can be parallelised during -// trace expansion. N Specifically, the unit of work is a single assignment. In -// the terminology of ParExec, an assignment is a batch of jobs, each of which -// assigns values to a given column. -type expandTraceJob struct { - // Index of first column in the assignment - index uint - // Assignment itself being computed - assignment Assignment - // Trace being expanded - trace tr.Trace -} - -// Jobs returns the underlying jobs that this "trace job" computes. -// Specifically, it identifies the columns that the trace job completes. -func (p expandTraceJob) Jobs() []uint { - n := p.assignment.Columns().Count() - cols := make([]uint, n) - // TODO: this is really a hack for now. I think we should use some kind of - // range iterator. - for i := uint(0); i < n; i++ { - cols[i] = p.index + i - } - - return cols -} - -// Dependencies returns the columns that this batch job depends upon. That is -// the set of source columns (if any) required before this job can be completed. -func (p expandTraceJob) Dependencies() []uint { - return p.assignment.Dependencies() -} - -// Run computes the coumns values for a given assignment. -func (p expandTraceJob) Run() error { - n := p.trace.Columns().Len() - if n != p.index { - panic(fmt.Sprintf("internal failure (%d trace columns, versus index %d)", n, p.index)) - } - // - return p.assignment.ExpandTrace(p.trace) -} diff --git a/pkg/trace/array_trace.go b/pkg/trace/array_trace.go index 6c845d4b..04dd662c 100644 --- a/pkg/trace/array_trace.go +++ b/pkg/trace/array_trace.go @@ -14,7 +14,7 @@ type ArrayTrace struct { // Holds the complete set of columns in this trace. The index of each // column in this array uniquely identifies it, and is referred to as the // "column index". - columns []*ArrayTraceColumn + columns []*Column // Holds the complete set of modules in this trace. The index of each // module in this array uniquely identifies it, and is referred to as the // "module index". @@ -30,7 +30,7 @@ func (p *ArrayTrace) Columns() ColumnSet { // Clone creates an identical clone of this trace. func (p *ArrayTrace) Clone() Trace { clone := new(ArrayTrace) - clone.columns = make([]*ArrayTraceColumn, len(p.columns)) + clone.columns = make([]*Column, len(p.columns)) clone.modules = make([]Module, len(p.modules)) // Clone modules for i, m := range p.modules { @@ -38,7 +38,7 @@ func (p *ArrayTrace) Clone() Trace { } // Clone columns for i, c := range p.columns { - clone.columns[i] = NewArrayTraceColumn(c.context, c.name, c.data.Clone(), c.padding) + clone.columns[i] = NewColumn(c.context, c.name, c.data.Clone(), c.padding) } // done return clone @@ -112,7 +112,7 @@ func (p arrayTraceColumnSet) Add(ctx Context, name string, data util.FrArray, pa } // Proceed index := uint(len(p.trace.columns)) - p.trace.columns = append(p.trace.columns, NewArrayTraceColumn(ctx, name, data, padding)) + p.trace.columns = append(p.trace.columns, NewColumn(ctx, name, data, padding)) // Register column with enclosing module m.registerColumn(index) // Done @@ -120,7 +120,7 @@ func (p arrayTraceColumnSet) Add(ctx Context, name string, data util.FrArray, pa } // Get returns the ith column in this column set. -func (p arrayTraceColumnSet) Get(index uint) Column { +func (p arrayTraceColumnSet) Get(index uint) *Column { return p.trace.columns[index] } @@ -236,85 +236,3 @@ func (p arrayTraceModuleSet) reseatColumns(mid uint, columns []uint) { p.trace.columns[c].reseat(mid) } } - -// ============================================================================ -// ArrayTraceColumn -// ============================================================================ - -// ArrayTraceColumn represents a column of data within a trace where each row is -// stored directly as a field element. This is the simplest form of column, -// which provides the fastest Get operation (i.e. because it just reads the -// field element out directly). However, at the same time, it can potentially -// use quite a lot of memory. In particular, when there are many different -// field elements which have smallish values then this requires excess data. -type ArrayTraceColumn struct { - // Evaluation context of this column - context Context - // Holds the name of this column - name string - // Holds the raw data making up this column - data util.FrArray - // Value to be used when padding this column - padding *fr.Element -} - -// NewArrayTraceColumn constructs a ArrayTraceColumn with the give name, data and padding. -func NewArrayTraceColumn(context Context, name string, data util.FrArray, - padding *fr.Element) *ArrayTraceColumn { - // Sanity check data length - if data.Len()%context.LengthMultiplier() != 0 { - panic("data length has incorrect multiplier") - } - // Done - return &ArrayTraceColumn{context, name, data, padding} -} - -// Context returns the evaluation context this column provides. -func (p *ArrayTraceColumn) Context() Context { - return p.context -} - -// Name returns the name of the given column. -func (p *ArrayTraceColumn) Name() string { - return p.name -} - -// Height determines the height of this column. -func (p *ArrayTraceColumn) Height() uint { - return p.data.Len() -} - -// Padding returns the value which will be used for padding this column. -func (p *ArrayTraceColumn) Padding() *fr.Element { - return p.padding -} - -// Data provides access to the underlying data of this column -func (p *ArrayTraceColumn) Data() util.FrArray { - return p.data -} - -// Get the value at a given row in this column. If the row is -// out-of-bounds, then the column's padding value is returned instead. -// Thus, this function always succeeds. -func (p *ArrayTraceColumn) Get(row int) *fr.Element { - if row < 0 || uint(row) >= p.data.Len() { - // out-of-bounds access - return p.padding - } - // in-bounds access - return p.data.Get(uint(row)) -} - -func (p *ArrayTraceColumn) pad(n uint) { - // Apply the length multiplier - n = n * p.context.LengthMultiplier() - // Pad front of array - p.data = p.data.PadFront(n, p.padding) -} - -// Reseat updates the module index of this column (e.g. as a result of a -// realignment). -func (p *ArrayTraceColumn) reseat(mid uint) { - p.context = NewContext(mid, p.context.LengthMultiplier()) -} diff --git a/pkg/trace/builder.go b/pkg/trace/builder.go index e206f98d..63ce3719 100644 --- a/pkg/trace/builder.go +++ b/pkg/trace/builder.go @@ -19,7 +19,7 @@ type Builder struct { // Mapping from name to module index modmap map[string]uint // Set of known columns - columns []*ArrayTraceColumn + columns []*Column } // NewBuilder constructs an empty builder which can then be used to build a new @@ -27,7 +27,7 @@ type Builder struct { func NewBuilder() *Builder { modules := make([]Module, 0) modmap := make(map[string]uint, 0) - columns := make([]*ArrayTraceColumn, 0) + columns := make([]*Column, 0) // Initially empty environment return &Builder{modules, modmap, columns} } @@ -58,7 +58,7 @@ func (p *Builder) Add(name string, padding *fr.Element, data util.FrArray) error // where we are importing expanded traces, then this might not be true. context := NewContext(mid, 1) // Register new column. - return p.registerColumn(NewArrayTraceColumn(context, colname, data, padding)) + return p.registerColumn(NewColumn(context, colname, data, padding)) } // HasModule checks whether a given module has already been registered with this @@ -101,7 +101,7 @@ func (p *Builder) splitQualifiedColumnName(name string) (string, string) { // RegisterColumn registers a new column with this builder. An error can arise // if the column's module does not exist, or if the column's height does not // match that of its enclosing module. -func (p *Builder) registerColumn(col *ArrayTraceColumn) error { +func (p *Builder) registerColumn(col *Column) error { mid := col.Context().Module() // Sanity check module exists if mid >= uint(len(p.modules)) { diff --git a/pkg/trace/trace.go b/pkg/trace/trace.go index 122c39bd..b5ef3fb1 100644 --- a/pkg/trace/trace.go +++ b/pkg/trace/trace.go @@ -21,7 +21,7 @@ type ColumnSet interface { // Add a new column to this column set. Add(ctx Context, name string, data util.FrArray, padding *fr.Element) uint // Get the ith module in this set. - Get(uint) Column + Get(uint) *Column // Determine index of given column, or return false if this fails. IndexOf(module uint, column string) (uint, bool) // Returns the number of items in this array. @@ -33,28 +33,6 @@ type ColumnSet interface { Trim(uint) } -// Column describes an individual column of data within a trace table. -type Column interface { - // Get the value at a given row in this column. If the row is - // out-of-bounds, then the column's padding value is returned instead. - // Thus, this function always succeeds. - Get(row int) *fr.Element - // Access the underlying data array for this column - Data() util.FrArray - // Return the height (i.e. number of rows) of this column. - Height() uint - // Returns the evaluation context for this column. That identifies the - // enclosing module, and then length multiplier (which must be a factor of - // the height). For example, if the multiplier is 2 then the height must - // always be a multiple of 2, etc. This affects padding also, as we must - // pad to this multiplier, etc. - Context() Context - // Get the name of this column - Name() string - // Return the value to use for padding this column. - Padding() *fr.Element -} - // ModuleSet provides an interface to the declared moules within this trace. type ModuleSet interface { // Register a new module with a given name and height, returning the module @@ -72,6 +50,83 @@ type ModuleSet interface { Swap(uint, uint) } +// ---------------------------------------------------------------------------- + +// Column describes an individual column of data within a trace table. +type Column struct { + // Evaluation context of this column + context Context + // Holds the name of this column + name string + // Holds the raw data making up this column + data util.FrArray + // Value to be used when padding this column + padding *fr.Element +} + +// NewColumn constructs a ArrayTraceColumn with the give name, data and padding. +func NewColumn(context Context, name string, data util.FrArray, + padding *fr.Element) *Column { + // Sanity check data length + if data.Len()%context.LengthMultiplier() != 0 { + panic("data length has incorrect multiplier") + } + // Done + return &Column{context, name, data, padding} +} + +// Context returns the evaluation context this column provides. +func (p *Column) Context() Context { + return p.context +} + +// Name returns the name of the given column. +func (p *Column) Name() string { + return p.name +} + +// Height determines the height of this column. +func (p *Column) Height() uint { + return p.data.Len() +} + +// Padding returns the value which will be used for padding this column. +func (p *Column) Padding() *fr.Element { + return p.padding +} + +// Data provides access to the underlying data of this column +func (p *Column) Data() util.FrArray { + return p.data +} + +// Get the value at a given row in this column. If the row is +// out-of-bounds, then the column's padding value is returned instead. +// Thus, this function always succeeds. +func (p *Column) Get(row int) *fr.Element { + if row < 0 || uint(row) >= p.data.Len() { + // out-of-bounds access + return p.padding + } + // in-bounds access + return p.data.Get(uint(row)) +} + +func (p *Column) pad(n uint) { + // Apply the length multiplier + n = n * p.context.LengthMultiplier() + // Pad front of array + p.data = p.data.PadFront(n, p.padding) +} + +// Reseat updates the module index of this column (e.g. as a result of a +// realignment). +func (p *Column) reseat(mid uint) { + p.context = NewContext(mid, p.context.LengthMultiplier()) +} + +// ---------------------------------------------------------------------------- + // Module describes an individual module within the trace table, and // permits actions on the columns of this module as a whole. type Module struct {