-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathu256.go
291 lines (263 loc) · 8.62 KB
/
u256.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
// Ported from https://github.com/holiman/uint256
// This package provides a 256-bit unsigned integer type, Uint256, and associated functions.
package uint256
import (
"errors"
"math/bits"
)
const (
MaxUint64 = 1<<64 - 1
uintSize = 32 << (^uint(0) >> 63)
)
// Uint is represented as an array of 4 uint64, in little-endian order,
// so that Uint[3] is the most significant, and Uint[0] is the least significant
type Uint struct {
arr [4]uint64
}
// NewUint returns a new initialized Uint.
func NewUint(val uint64) *Uint {
z := &Uint{arr: [4]uint64{val, 0, 0, 0}}
return z
}
// Zero returns a new Uint initialized to zero.
func Zero() *Uint {
return NewUint(0)
}
// One returns a new Uint initialized to one.
func One() *Uint {
return NewUint(1)
}
// SetAllOne sets all the bits of z to 1
func (z *Uint) SetAllOne() *Uint {
z.arr[3], z.arr[2], z.arr[1], z.arr[0] = MaxUint64, MaxUint64, MaxUint64, MaxUint64
return z
}
// Set sets z to x and returns z.
func (z *Uint) Set(x *Uint) *Uint {
*z = *x
return z
}
// SetOne sets z to 1
func (z *Uint) SetOne() *Uint {
z.arr[3], z.arr[2], z.arr[1], z.arr[0] = 0, 0, 0, 1
return z
}
const twoPow256Sub1 = "115792089237316195423570985008687907853269984665640564039457584007913129639935"
// SetFromDecimal sets z from the given string, interpreted as a decimal number.
// OBS! This method is _not_ strictly identical to the (*big.Uint).SetString(..., 10) method.
// Notable differences:
// - This method does not accept underscore input, e.g. "100_000",
// - This method does not accept negative zero as valid, e.g "-0",
// - (this method does not accept any negative input as valid))
func (z *Uint) SetFromDecimal(s string) (err error) {
// Remove max one leading +
if len(s) > 0 && s[0] == '+' {
s = s[1:]
}
// Remove any number of leading zeroes
if len(s) > 0 && s[0] == '0' {
var i int
var c rune
for i, c = range s {
if c != '0' {
break
}
}
s = s[i:]
}
if len(s) < len(twoPow256Sub1) {
return z.fromDecimal(s)
}
if len(s) == len(twoPow256Sub1) {
if s > twoPow256Sub1 {
return ErrBig256Range
}
return z.fromDecimal(s)
}
return ErrBig256Range
}
// FromDecimal is a convenience-constructor to create an Uint from a
// decimal (base 10) string. Numbers larger than 256 bits are not accepted.
func FromDecimal(decimal string) (*Uint, error) {
var z Uint
if err := z.SetFromDecimal(decimal); err != nil {
return nil, err
}
return &z, nil
}
// MustFromDecimal is a convenience-constructor to create an Uint from a
// decimal (base 10) string.
// Returns a new Uint and panics if any error occurred.
func MustFromDecimal(decimal string) *Uint {
var z Uint
if err := z.SetFromDecimal(decimal); err != nil {
panic(err)
}
return &z
}
// multipliers holds the values that are needed for fromDecimal
var multipliers = [5]*Uint{
nil, // represents first round, no multiplication needed
{[4]uint64{10000000000000000000, 0, 0, 0}}, // 10 ^ 19
{[4]uint64{687399551400673280, 5421010862427522170, 0, 0}}, // 10 ^ 38
{[4]uint64{5332261958806667264, 17004971331911604867, 2938735877055718769, 0}}, // 10 ^ 57
{[4]uint64{0, 8607968719199866880, 532749306367912313, 1593091911132452277}}, // 10 ^ 76
}
// fromDecimal is a helper function to only ever be called via SetFromDecimal
// this function takes a string and chunks it up, calling ParseUint on it up to 5 times
// these chunks are then multiplied by the proper power of 10, then added together.
func (z *Uint) fromDecimal(bs string) error {
// first clear the input
z.Clear()
// the maximum value of uint64 is 18446744073709551615, which is 20 characters
// one less means that a string of 19 9's is always within the uint64 limit
var (
num uint64
err error
remaining = len(bs)
)
if remaining == 0 {
return errors.New("EOF")
}
// We proceed in steps of 19 characters (nibbles), from least significant to most significant.
// This means that the first (up to) 19 characters do not need to be multiplied.
// In the second iteration, our slice of 19 characters needs to be multipleied
// by a factor of 10^19. Et cetera.
for i, mult := range multipliers {
if remaining <= 0 {
return nil // Done
} else if remaining > 19 {
num, err = parseUint(bs[remaining-19:remaining], 10, 64)
} else {
// Final round
num, err = parseUint(bs, 10, 64)
}
if err != nil {
return err
}
// add that number to our running total
if i == 0 {
z.SetUint64(num)
} else {
base := NewUint(num)
z.Add(z, base.Mul(base, mult))
}
// Chop off another 19 characters
if remaining > 19 {
bs = bs[0 : remaining-19]
}
remaining -= 19
}
return nil
}
// Byte sets z to the value of the byte at position n,
// with 'z' considered as a big-endian 32-byte integer
// if 'n' > 32, f is set to 0
// Example: f = '5', n=31 => 5
func (z *Uint) Byte(n *Uint) *Uint {
// in z, z.arr[0] is the least significant
if number, overflow := n.Uint64WithOverflow(); !overflow {
if number < 32 {
number := z.arr[4-1-number/8]
offset := (n.arr[0] & 0x7) << 3 // 8*(n.d % 8)
z.arr[0] = (number & (0xff00000000000000 >> offset)) >> (56 - offset)
z.arr[3], z.arr[2], z.arr[1] = 0, 0, 0
return z
}
}
return z.Clear()
}
// BitLen returns the number of bits required to represent z
func (z *Uint) BitLen() int {
switch {
case z.arr[3] != 0:
return 192 + bits.Len64(z.arr[3])
case z.arr[2] != 0:
return 128 + bits.Len64(z.arr[2])
case z.arr[1] != 0:
return 64 + bits.Len64(z.arr[1])
default:
return bits.Len64(z.arr[0])
}
}
// ByteLen returns the number of bytes required to represent z
func (z *Uint) ByteLen() int {
return (z.BitLen() + 7) / 8
}
// Clear sets z to 0
func (z *Uint) Clear() *Uint {
z.arr[3], z.arr[2], z.arr[1], z.arr[0] = 0, 0, 0, 0
return z
}
const (
// hextable = "0123456789abcdef"
bintable = "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x01\x02\x03\x04\x05\x06\a\b\t\xff\xff\xff\xff\xff\xff\xff\n\v\f\r\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\n\v\f\r\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
badNibble = 0xff
)
// SetFromHex sets z from the given string, interpreted as a hexadecimal number.
// OBS! This method is _not_ strictly identical to the (*big.Int).SetString(..., 16) method.
// Notable differences:
// - This method _require_ "0x" or "0X" prefix.
// - This method does not accept zero-prefixed hex, e.g. "0x0001"
// - This method does not accept underscore input, e.g. "100_000",
// - This method does not accept negative zero as valid, e.g "-0x0",
// - (this method does not accept any negative input as valid)
func (z *Uint) SetFromHex(hex string) error {
return z.fromHex(hex)
}
// fromHex is the internal implementation of parsing a hex-string.
func (z *Uint) fromHex(hex string) error {
if err := checkNumberS(hex); err != nil {
return err
}
if len(hex) > 66 {
return ErrBig256Range
}
z.Clear()
end := len(hex)
for i := 0; i < 4; i++ {
start := end - 16
if start < 2 {
start = 2
}
for ri := start; ri < end; ri++ {
nib := bintable[hex[ri]]
if nib == badNibble {
return ErrSyntax
}
z.arr[i] = z.arr[i] << 4
z.arr[i] += uint64(nib)
}
end = start
}
return nil
}
// FromHex is a convenience-constructor to create an Uint from
// a hexadecimal string. The string is required to be '0x'-prefixed
// Numbers larger than 256 bits are not accepted.
func FromHex(hex string) (*Uint, error) {
var z Uint
if err := z.fromHex(hex); err != nil {
return nil, err
}
return &z, nil
}
// MustFromHex is a convenience-constructor to create an Uint from
// a hexadecimal string.
// Returns a new Uint and panics if any error occurred.
func MustFromHex(hex string) *Uint {
var z Uint
if err := z.fromHex(hex); err != nil {
panic(err)
}
return &z
}
// Clone creates a new Uint identical to z
func (z *Uint) Clone() *Uint {
var x Uint
x.arr[0] = z.arr[0]
x.arr[1] = z.arr[1]
x.arr[2] = z.arr[2]
x.arr[3] = z.arr[3]
return &x
}