@@ -25,7 +25,7 @@ import (
25
25
26
26
// The inlineWords capacity is set to accommodate any value that would fit in a
27
27
// 128-bit integer (i.e. values with an absolute value up to 2^128 - 1).
28
- const inlineWords = 2
28
+ const inlineWords = 128 / bits . UintSize
29
29
30
30
// BigInt is a wrapper around big.Int. It minimizes memory allocation by using
31
31
// an inline array to back the big.Int's variable-length "nat" slice when the
@@ -206,54 +206,70 @@ func (z *BigInt) updateInner(src *big.Int) {
206
206
}
207
207
}
208
208
209
- // innerAsUint returns the BigInt's current absolute value as a uint and a flag
210
- // indicating whether the value is negative. If the value is not stored inline
211
- // or if it can not fit in a uint, false is returned.
209
+ const wordsInUint64 = 64 / bits .UintSize
210
+
211
+ func init () {
212
+ if inlineWords < wordsInUint64 {
213
+ panic ("inline array must be at least 64 bits large" )
214
+ }
215
+ }
216
+
217
+ // innerAsUint64 returns the BigInt's current absolute value as a uint64 and a
218
+ // flag indicating whether the value is negative. If the value is not stored
219
+ // inline or if it can not fit in a uint64, false is returned.
212
220
//
213
221
// NOTE: this was carefully written to permit function inlining. Modify with
214
222
// care.
215
223
//gcassert:inline
216
- func (z * BigInt ) innerAsUint () (val uint , neg bool , ok bool ) {
224
+ func (z * BigInt ) innerAsUint64 () (val uint64 , neg bool , ok bool ) {
217
225
if ! z .isInline () {
218
226
// The value is not stored inline.
219
227
return 0 , false , false
220
228
}
221
- if inlineWords == 2 {
229
+ if bits . UintSize == 64 && inlineWords == 2 {
222
230
// Manually unrolled loop for current inlineWords setting.
223
231
if z ._inline [1 ] != 0 {
224
- // The value can not fit in a uint .
232
+ // The value can not fit in a uint64 .
225
233
return 0 , false , false
226
234
}
227
235
} else {
228
236
// Fallback for other values of inlineWords.
229
- for i := 1 ; i < len (z ._inline ); i ++ {
237
+ for i := wordsInUint64 ; i < len (z ._inline ); i ++ {
230
238
if z ._inline [i ] != 0 {
231
- // The value can not fit in a uint .
239
+ // The value can not fit in a uint64 .
232
240
return 0 , false , false
233
241
}
234
242
}
235
243
}
236
244
237
- val = uint (z ._inline [0 ])
245
+ val = uint64 (z ._inline [0 ])
246
+ if bits .UintSize == 32 {
247
+ // From big.low64.
248
+ val = uint64 (z ._inline [1 ])<< 32 | val
249
+ }
238
250
neg = z ._inner == negSentinel
239
251
return val , neg , true
240
252
}
241
253
242
- // updateInnerFromUint updates the BigInt's current value with the provided
254
+ // updateInnerFromUint64 updates the BigInt's current value with the provided
243
255
// absolute value and sign.
244
256
//
245
257
// NOTE: this was carefully written to permit function inlining. Modify with
246
258
// care.
247
259
//gcassert:inline
248
- func (z * BigInt ) updateInnerFromUint (val uint , neg bool ) {
260
+ func (z * BigInt ) updateInnerFromUint64 (val uint64 , neg bool ) {
249
261
// Set the inline value, making sure to clear out all other words.
250
262
z ._inline [0 ] = big .Word (val )
251
- if inlineWords == 2 {
263
+ if bits .UintSize == 32 {
264
+ // From (big.nat).setUint64.
265
+ z ._inline [1 ] = big .Word (val >> 32 )
266
+ }
267
+ if bits .UintSize == 64 && inlineWords == 2 {
252
268
// Manually unrolled loop for current inlineWords setting.
253
269
z ._inline [1 ] = 0
254
270
} else {
255
271
// Fallback for other values of inlineWords.
256
- for i := 1 ; i < len (z ._inline ); i ++ {
272
+ for i := wordsInUint64 ; i < len (z ._inline ); i ++ {
257
273
z ._inline [i ] = 0
258
274
}
259
275
}
@@ -285,14 +301,14 @@ func (z *BigInt) Size() uintptr {
285
301
///////////////////////////////////////////////////////////////////////////////
286
302
287
303
//gcassert:inline
288
- func addInline (xVal , yVal uint , xNeg , yNeg bool ) (zVal uint , zNeg , ok bool ) {
304
+ func addInline (xVal , yVal uint64 , xNeg , yNeg bool ) (zVal uint64 , zNeg , ok bool ) {
289
305
if xNeg == yNeg {
290
- sum , carry := bits .Add (xVal , yVal , 0 )
306
+ sum , carry := bits .Add64 (xVal , yVal , 0 )
291
307
overflow := carry != 0
292
308
return sum , xNeg , ! overflow
293
309
}
294
310
295
- diff , borrow := bits .Sub (xVal , yVal , 0 )
311
+ diff , borrow := bits .Sub64 (xVal , yVal , 0 )
296
312
if borrow != 0 { // underflow
297
313
xNeg = ! xNeg
298
314
diff = yVal - xVal
@@ -304,15 +320,15 @@ func addInline(xVal, yVal uint, xNeg, yNeg bool) (zVal uint, zNeg, ok bool) {
304
320
}
305
321
306
322
//gcassert:inline
307
- func mulInline (xVal , yVal uint , xNeg , yNeg bool ) (zVal uint , zNeg , ok bool ) {
308
- hi , lo := bits .Mul (xVal , yVal )
323
+ func mulInline (xVal , yVal uint64 , xNeg , yNeg bool ) (zVal uint64 , zNeg , ok bool ) {
324
+ hi , lo := bits .Mul64 (xVal , yVal )
309
325
neg := xNeg != yNeg
310
326
overflow := hi != 0
311
327
return lo , neg , ! overflow
312
328
}
313
329
314
330
//gcassert:inline
315
- func quoInline (xVal , yVal uint , xNeg , yNeg bool ) (quoVal uint , quoNeg , ok bool ) {
331
+ func quoInline (xVal , yVal uint64 , xNeg , yNeg bool ) (quoVal uint64 , quoNeg , ok bool ) {
316
332
if yVal == 0 { // divide by 0
317
333
return 0 , false , false
318
334
}
@@ -322,7 +338,7 @@ func quoInline(xVal, yVal uint, xNeg, yNeg bool) (quoVal uint, quoNeg, ok bool)
322
338
}
323
339
324
340
//gcassert:inline
325
- func remInline (xVal , yVal uint , xNeg , yNeg bool ) (remVal uint , remNeg , ok bool ) {
341
+ func remInline (xVal , yVal uint64 , xNeg , yNeg bool ) (remVal uint64 , remNeg , ok bool ) {
326
342
if yVal == 0 { // divide by 0
327
343
return 0 , false , false
328
344
}
@@ -350,10 +366,10 @@ func (z *BigInt) Abs(x *BigInt) *BigInt {
350
366
351
367
// Add calls (big.Int).Add.
352
368
func (z * BigInt ) Add (x , y * BigInt ) * BigInt {
353
- if xVal , xNeg , ok := x .innerAsUint (); ok {
354
- if yVal , yNeg , ok := y .innerAsUint (); ok {
369
+ if xVal , xNeg , ok := x .innerAsUint64 (); ok {
370
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
355
371
if zVal , zNeg , ok := addInline (xVal , yVal , xNeg , yNeg ); ok {
356
- z .updateInnerFromUint (zVal , zNeg )
372
+ z .updateInnerFromUint64 (zVal , zNeg )
357
373
return z
358
374
}
359
375
}
@@ -389,13 +405,13 @@ func (z *BigInt) Append(buf []byte, base int) []byte {
389
405
// Fast-path that avoids innerOrNil, allowing inner to be inlined.
390
406
return append (buf , "<nil>" ... )
391
407
}
392
- if zVal , zNeg , ok := z .innerAsUint (); ok {
408
+ if zVal , zNeg , ok := z .innerAsUint64 (); ok {
393
409
// Check if the base is supported by strconv.AppendUint.
394
410
if base >= 2 && base <= 36 {
395
411
if zNeg {
396
412
buf = append (buf , '-' )
397
413
}
398
- return strconv .AppendUint (buf , uint64 ( zVal ) , base )
414
+ return strconv .AppendUint (buf , zVal , base )
399
415
}
400
416
}
401
417
var tmp1 big.Int
@@ -450,8 +466,8 @@ func (z *BigInt) Bytes() []byte {
450
466
451
467
// Cmp calls (big.Int).Cmp.
452
468
func (z * BigInt ) Cmp (y * BigInt ) (r int ) {
453
- if zVal , zNeg , ok := z .innerAsUint (); ok {
454
- if yVal , yNeg , ok := y .innerAsUint (); ok {
469
+ if zVal , zNeg , ok := z .innerAsUint64 (); ok {
470
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
455
471
switch {
456
472
case zNeg == yNeg :
457
473
switch {
@@ -477,8 +493,8 @@ func (z *BigInt) Cmp(y *BigInt) (r int) {
477
493
478
494
// CmpAbs calls (big.Int).CmpAbs.
479
495
func (z * BigInt ) CmpAbs (y * BigInt ) (r int ) {
480
- if zVal , _ , ok := z .innerAsUint (); ok {
481
- if yVal , _ , ok := y .innerAsUint (); ok {
496
+ if zVal , _ , ok := z .innerAsUint64 (); ok {
497
+ if yVal , _ , ok := y .innerAsUint64 (); ok {
482
498
switch {
483
499
case zVal < yVal :
484
500
r = - 1
@@ -571,38 +587,32 @@ func (z *BigInt) GobDecode(buf []byte) error {
571
587
572
588
// Int64 calls (big.Int).Int64.
573
589
func (z * BigInt ) Int64 () int64 {
574
- if bits .UintSize == 64 {
575
- if zVal , zNeg , ok := z .innerAsUint (); ok {
576
- zi := int64 (zVal )
577
- if zNeg {
578
- zi = - zi
579
- }
580
- return zi
590
+ if zVal , zNeg , ok := z .innerAsUint64 (); ok {
591
+ zi := int64 (zVal )
592
+ if zNeg {
593
+ zi = - zi
581
594
}
595
+ return zi
582
596
}
583
597
var tmp1 big.Int
584
598
return z .inner (& tmp1 ).Int64 ()
585
599
}
586
600
587
601
// IsInt64 calls (big.Int).IsInt64.
588
602
func (z * BigInt ) IsInt64 () bool {
589
- if bits .UintSize == 64 {
590
- if zVal , zNeg , ok := z .innerAsUint (); ok {
591
- // From (big.Int).IsInt64.
592
- zi := int64 (zVal )
593
- return zi >= 0 || zNeg && zi == - zi
594
- }
603
+ if zVal , zNeg , ok := z .innerAsUint64 (); ok {
604
+ // From (big.Int).IsInt64.
605
+ zi := int64 (zVal )
606
+ return zi >= 0 || zNeg && zi == - zi
595
607
}
596
608
var tmp1 big.Int
597
609
return z .inner (& tmp1 ).IsInt64 ()
598
610
}
599
611
600
612
// IsUint64 calls (big.Int).IsUint64.
601
613
func (z * BigInt ) IsUint64 () bool {
602
- if bits .UintSize == 64 {
603
- if _ , zNeg , ok := z .innerAsUint (); ok {
604
- return ! zNeg
605
- }
614
+ if _ , zNeg , ok := z .innerAsUint64 (); ok {
615
+ return ! zNeg
606
616
}
607
617
var tmp1 big.Int
608
618
return z .inner (& tmp1 ).IsUint64 ()
@@ -664,10 +674,10 @@ func (z *BigInt) ModSqrt(x, p *BigInt) *BigInt {
664
674
665
675
// Mul calls (big.Int).Mul.
666
676
func (z * BigInt ) Mul (x , y * BigInt ) * BigInt {
667
- if xVal , xNeg , ok := x .innerAsUint (); ok {
668
- if yVal , yNeg , ok := y .innerAsUint (); ok {
677
+ if xVal , xNeg , ok := x .innerAsUint64 (); ok {
678
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
669
679
if zVal , zNeg , ok := mulInline (xVal , yVal , xNeg , yNeg ); ok {
670
- z .updateInnerFromUint (zVal , zNeg )
680
+ z .updateInnerFromUint64 (zVal , zNeg )
671
681
return z
672
682
}
673
683
}
@@ -732,10 +742,10 @@ func (z *BigInt) ProbablyPrime(n int) bool {
732
742
733
743
// Quo calls (big.Int).Quo.
734
744
func (z * BigInt ) Quo (x , y * BigInt ) * BigInt {
735
- if xVal , xNeg , ok := x .innerAsUint (); ok {
736
- if yVal , yNeg , ok := y .innerAsUint (); ok {
745
+ if xVal , xNeg , ok := x .innerAsUint64 (); ok {
746
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
737
747
if quoVal , quoNeg , ok := quoInline (xVal , yVal , xNeg , yNeg ); ok {
738
- z .updateInnerFromUint (quoVal , quoNeg )
748
+ z .updateInnerFromUint64 (quoVal , quoNeg )
739
749
return z
740
750
}
741
751
}
@@ -749,12 +759,12 @@ func (z *BigInt) Quo(x, y *BigInt) *BigInt {
749
759
750
760
// QuoRem calls (big.Int).QuoRem.
751
761
func (z * BigInt ) QuoRem (x , y , r * BigInt ) (* BigInt , * BigInt ) {
752
- if xVal , xNeg , ok := x .innerAsUint (); ok {
753
- if yVal , yNeg , ok := y .innerAsUint (); ok {
762
+ if xVal , xNeg , ok := x .innerAsUint64 (); ok {
763
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
754
764
if quoVal , quoNeg , ok := quoInline (xVal , yVal , xNeg , yNeg ); ok {
755
765
if remVal , remNeg , ok := remInline (xVal , yVal , xNeg , yNeg ); ok {
756
- z .updateInnerFromUint (quoVal , quoNeg )
757
- r .updateInnerFromUint (remVal , remNeg )
766
+ z .updateInnerFromUint64 (quoVal , quoNeg )
767
+ r .updateInnerFromUint64 (remVal , remNeg )
758
768
return z , r
759
769
}
760
770
}
@@ -780,10 +790,10 @@ func (z *BigInt) Rand(rnd *rand.Rand, n *BigInt) *BigInt {
780
790
781
791
// Rem calls (big.Int).Rem.
782
792
func (z * BigInt ) Rem (x , y * BigInt ) * BigInt {
783
- if xVal , xNeg , ok := x .innerAsUint (); ok {
784
- if yVal , yNeg , ok := y .innerAsUint (); ok {
793
+ if xVal , xNeg , ok := x .innerAsUint64 (); ok {
794
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
785
795
if remVal , remNeg , ok := remInline (xVal , yVal , xNeg , yNeg ); ok {
786
- z .updateInnerFromUint (remVal , remNeg )
796
+ z .updateInnerFromUint64 (remVal , remNeg )
787
797
return z
788
798
}
789
799
}
@@ -857,19 +867,12 @@ func (z *BigInt) SetBytes(buf []byte) *BigInt {
857
867
858
868
// SetInt64 calls (big.Int).SetInt64.
859
869
func (z * BigInt ) SetInt64 (x int64 ) * BigInt {
860
- if bits .UintSize == 64 {
861
- neg := false
862
- if x < 0 {
863
- neg = true
864
- x = - x
865
- }
866
- z .updateInnerFromUint (uint (x ), neg )
867
- return z
870
+ neg := false
871
+ if x < 0 {
872
+ neg = true
873
+ x = - x
868
874
}
869
- var tmp1 big.Int
870
- zi := z .inner (& tmp1 )
871
- zi .SetInt64 (x )
872
- z .updateInner (zi )
875
+ z .updateInnerFromUint64 (uint64 (x ), neg )
873
876
return z
874
877
}
875
878
@@ -886,14 +889,7 @@ func (z *BigInt) SetString(s string, base int) (*BigInt, bool) {
886
889
887
890
// SetUint64 calls (big.Int).SetUint64.
888
891
func (z * BigInt ) SetUint64 (x uint64 ) * BigInt {
889
- if bits .UintSize == 64 {
890
- z .updateInnerFromUint (uint (x ), false )
891
- return z
892
- }
893
- var tmp1 big.Int
894
- zi := z .inner (& tmp1 )
895
- zi .SetUint64 (x )
896
- z .updateInner (zi )
892
+ z .updateInnerFromUint64 (x , false )
897
893
return z
898
894
}
899
895
@@ -931,10 +927,10 @@ func (z *BigInt) String() string {
931
927
932
928
// Sub calls (big.Int).Sub.
933
929
func (z * BigInt ) Sub (x , y * BigInt ) * BigInt {
934
- if xVal , xNeg , ok := x .innerAsUint (); ok {
935
- if yVal , yNeg , ok := y .innerAsUint (); ok {
930
+ if xVal , xNeg , ok := x .innerAsUint64 (); ok {
931
+ if yVal , yNeg , ok := y .innerAsUint64 (); ok {
936
932
if zVal , zNeg , ok := addInline (xVal , yVal , xNeg , ! yNeg ); ok {
937
- z .updateInnerFromUint (zVal , zNeg )
933
+ z .updateInnerFromUint64 (zVal , zNeg )
938
934
return z
939
935
}
940
936
}
@@ -964,10 +960,8 @@ func (z *BigInt) TrailingZeroBits() uint {
964
960
965
961
// Uint64 calls (big.Int).Uint64.
966
962
func (z * BigInt ) Uint64 () uint64 {
967
- if bits .UintSize == 64 {
968
- if zVal , _ , ok := z .innerAsUint (); ok {
969
- return uint64 (zVal )
970
- }
963
+ if zVal , _ , ok := z .innerAsUint64 (); ok {
964
+ return zVal
971
965
}
972
966
var tmp1 big.Int
973
967
return z .inner (& tmp1 ).Uint64 ()
0 commit comments