A Discrete-Event Network Simulator
API
cairo-wideint.c
Go to the documentation of this file.
1 /* cairo - a vector graphics library with display and print output
2  *
3  * Copyright © 2004 Keith Packard
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation;
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * The original code as contributed to the cairo library under
19  * the dual license MPL+LGPL. We used the LGPL relicensing clause to
20  * get a GPL version of this code which now lives here. This header is
21  * unmodified other than the licensing clause.
22  *
23  * The Original Code is the cairo graphics library.
24  *
25  * The Initial Developer of the Original Code is Keith Packard
26  *
27  * Contributor(s):
28  * Keith R. Packard <keithp@keithp.com>
29  *
30  * Code changes for ns-3 from upstream are marked with `//PDB'
31  */
32 
33 // NOLINTBEGIN
34 // clang-format off
35 
36 #include "cairo-wideint-private.h"
37 
38 #include <climits>
39 
46 #if HAVE_UINT64_T
47 
48 const char * cairo_impl64 = "uint64_t";
49 
50 #define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
51 
54 {
56 
57  qr.quo = num / den;
58  qr.rem = num % den;
59  return qr;
60 }
61 
62 #else
63 
64 const char * cairo_impl64 = "uint32_t";
65 
67 _cairo_uint32_to_uint64 (uint32_t i)
68 {
70 
71  q.lo = i;
72  q.hi = 0;
73  return q;
74 }
75 
77 _cairo_int32_to_int64 (int32_t i)
78 {
80 
81  q.lo = i;
82  q.hi = i < 0 ? -1 : 0;
83  return q;
84 }
85 
86 static cairo_uint64_t
87 _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
88 {
90 
91  q.lo = l;
92  q.hi = h;
93  return q;
94 }
95 
98 {
100 
101  s.hi = a.hi + b.hi;
102  s.lo = a.lo + b.lo;
103  if (s.lo < a.lo)
104  s.hi++;
105  return s;
106 }
107 
110 {
111  cairo_uint64_t s;
112 
113  s.hi = a.hi - b.hi;
114  s.lo = a.lo - b.lo;
115  if (s.lo > a.lo)
116  s.hi--;
117  return s;
118 }
119 
120 #define uint32_lo(i) ((i) & 0xffff)
121 #define uint32_hi(i) ((i) >> 16)
122 #define uint32_carry16 ((1) << 16)
123 
125 _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
126 {
127  cairo_uint64_t s;
128 
129  uint16_t ah, al, bh, bl;
130  uint32_t r0, r1, r2, r3;
131 
132  al = uint32_lo (a);
133  ah = uint32_hi (a);
134  bl = uint32_lo (b);
135  bh = uint32_hi (b);
136 
137  r0 = (uint32_t) al * bl;
138  r1 = (uint32_t) al * bh;
139  r2 = (uint32_t) ah * bl;
140  r3 = (uint32_t) ah * bh;
141 
142  r1 += uint32_hi(r0); /* no carry possible */
143  r1 += r2; /* but this can carry */
144  if (r1 < r2) /* check */
145  r3 += uint32_carry16;
146 
147  s.hi = r3 + uint32_hi(r1);
148  s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
149  return s;
150 }
151 
153 _cairo_int32x32_64_mul (int32_t a, int32_t b)
154 {
155  cairo_int64_t s;
156  s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
157  if (a < 0)
158  s.hi -= b;
159  if (b < 0)
160  s.hi -= a;
161  return s;
162 }
163 
166 {
167  cairo_uint64_t s;
168 
169  s = _cairo_uint32x32_64_mul (a.lo, b.lo);
170  s.hi += a.lo * b.hi + a.hi * b.lo;
171  return s;
172 }
173 
175 _cairo_uint64_lsl (cairo_uint64_t a, int shift)
176 {
177  if (shift >= 32)
178  {
179  a.hi = a.lo;
180  a.lo = 0;
181  shift -= 32;
182  }
183  if (shift)
184  {
185  a.hi = a.hi << shift | a.lo >> (32 - shift);
186  a.lo = a.lo << shift;
187  }
188  return a;
189 }
190 
192 _cairo_uint64_rsl (cairo_uint64_t a, int shift)
193 {
194  if (shift >= 32)
195  {
196  a.lo = a.hi;
197  a.hi = 0;
198  shift -= 32;
199  }
200  if (shift)
201  {
202  a.lo = a.lo >> shift | a.hi << (32 - shift);
203  a.hi = a.hi >> shift;
204  }
205  return a;
206 }
207 
208 #define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
209 
211 _cairo_uint64_rsa (cairo_int64_t a, int shift)
212 {
213  if (shift >= 32)
214  {
215  a.lo = a.hi;
216  a.hi = _cairo_uint32_rsa (a.hi, 31);
217  shift -= 32;
218  }
219  if (shift)
220  {
221  a.lo = a.lo >> shift | a.hi << (32 - shift);
222  a.hi = _cairo_uint32_rsa (a.hi, shift);
223  }
224  return a;
225 }
226 
227 int
229 {
230  return (a.hi < b.hi ||
231  (a.hi == b.hi && a.lo < b.lo));
232 }
233 
234 int
236 {
237  return a.hi == b.hi && a.lo == b.lo;
238 }
239 
240 int
242 {
244  return 1;
246  return 0;
247  return _cairo_uint64_lt (a, b);
248 }
249 
252 {
253  a.lo = ~a.lo;
254  a.hi = ~a.hi;
255  return a;
256 }
257 
260 {
261  a.lo = ~a.lo;
262  a.hi = ~a.hi;
263  if (++a.lo == 0)
264  ++a.hi;
265  return a;
266 }
267 
268 /*
269  * Simple bit-at-a-time divide.
270  */
273 {
275  cairo_uint64_t bit;
276  cairo_uint64_t quo;
277 
278  bit = _cairo_uint32_to_uint64 (1);
279 
280  /* normalize to make den >= num, but not overflow */
281  while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
282  {
283  bit = _cairo_uint64_lsl (bit, 1);
284  den = _cairo_uint64_lsl (den, 1);
285  }
286  quo = _cairo_uint32_to_uint64 (0);
287 
288  /* generate quotient, one bit at a time */
289  while (bit.hi | bit.lo)
290  {
291  if (_cairo_uint64_le (den, num))
292  {
293  num = _cairo_uint64_sub (num, den);
294  quo = _cairo_uint64_add (quo, bit);
295  }
296  bit = _cairo_uint64_rsl (bit, 1);
297  den = _cairo_uint64_rsl (den, 1);
298  }
299  qr.quo = quo;
300  qr.rem = num;
301  return qr;
302 }
303 
304 #endif /* !HAVE_UINT64_T */
305 
308 {
309  int num_neg = _cairo_int64_negative (num);
310  int den_neg = _cairo_int64_negative (den);
311  cairo_uquorem64_t uqr;
312  cairo_quorem64_t qr;
313 
314  if (num_neg)
316  if (den_neg)
317  den = _cairo_int64_negate (den);
318  uqr = _cairo_uint64_divrem (num, den);
319  if (num_neg)
320  qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
321  else
322  qr.rem = uqr.rem;
323  if (num_neg != den_neg)
324  qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
325  else
326  qr.quo = (cairo_int64_t) uqr.quo;
327  return qr;
328 }
329 
330 #if HAVE_UINT128_T
331 
332 const char * cairo_impl128 = "uint128_t";
333 
335 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
336 {
338 
339  qr.quo = num / den;
340  qr.rem = num % den;
341  return qr;
342 }
343 
344 #else
345 
346 const char * cairo_impl128 = "cairo_uint64_t";
347 
348 cairo_uint128_t
350 {
351  cairo_uint128_t q;
352 
353  q.lo = _cairo_uint32_to_uint64 (i);
354  q.hi = _cairo_uint32_to_uint64 (0);
355  return q;
356 }
357 
360 {
361  cairo_int128_t q;
362 
363  q.lo = _cairo_int32_to_int64 (i);
364  q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
365  return q;
366 }
367 
368 cairo_uint128_t
370 {
371  cairo_uint128_t q;
372 
373  q.lo = i;
374  q.hi = _cairo_uint32_to_uint64 (0);
375  return q;
376 }
377 
380 {
381  cairo_int128_t q;
382 
383  q.lo = i;
385  return q;
386 }
387 
388 cairo_uint128_t
389 _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
390 {
391  cairo_uint128_t s;
392 
393  s.hi = _cairo_uint64_add (a.hi, b.hi);
394  s.lo = _cairo_uint64_add (a.lo, b.lo);
395  if (_cairo_uint64_lt (s.lo, a.lo))
396  s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
397  return s;
398 }
399 
400 cairo_uint128_t
401 _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
402 {
403  cairo_uint128_t s;
404 
405  s.hi = _cairo_uint64_sub (a.hi, b.hi);
406  s.lo = _cairo_uint64_sub (a.lo, b.lo);
407  if (_cairo_uint64_gt (s.lo, a.lo))
408  s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
409  return s;
410 }
411 
412 #if HAVE_UINT64_T
413 
414 #define uint64_lo32(i) ((i) & 0xffffffff)
415 #define uint64_hi32(i) ((i) >> 32)
416 #define uint64_lo(i) ((i) & 0xffffffff)
417 #define uint64_hi(i) ((i) >> 32)
418 #define uint64_shift32(i) ((i) << 32)
419 #define uint64_carry32 (((uint64_t) 1) << 32)
420 
421 #else
422 
423 #define uint64_lo32(i) ((i).lo)
424 #define uint64_hi32(i) ((i).hi)
425 
426 static cairo_uint64_t
428 {
429  cairo_uint64_t s;
430 
431  s.lo = i.lo;
432  s.hi = 0;
433  return s;
434 }
435 
436 static cairo_uint64_t
438 {
439  cairo_uint64_t s;
440 
441  s.lo = i.hi;
442  s.hi = 0;
443  return s;
444 }
445 
446 static cairo_uint64_t
448 {
449  cairo_uint64_t s;
450 
451  s.lo = 0;
452  s.hi = i.lo;
453  return s;
454 }
455 
456 static const cairo_uint64_t uint64_carry32 = { 0, 1 };
457 
458 #endif
459 
460 cairo_uint128_t
462 {
463  cairo_uint128_t s;
464  uint32_t ah, al, bh, bl;
465  cairo_uint64_t r0, r1, r2, r3;
466 
467  al = uint64_lo32 (a);
468  ah = uint64_hi32 (a);
469  bl = uint64_lo32 (b);
470  bh = uint64_hi32 (b);
471 
472  r0 = _cairo_uint32x32_64_mul (al, bl);
473  r1 = _cairo_uint32x32_64_mul (al, bh);
474  r2 = _cairo_uint32x32_64_mul (ah, bl);
475  r3 = _cairo_uint32x32_64_mul (ah, bh);
476 
477  r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
478  r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
479  if (_cairo_uint64_lt (r1, r2)) /* check */
481 
482  s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
483  s.lo = _cairo_uint64_add (uint64_shift32 (r1),
484  uint64_lo (r0));
485  return s;
486 }
487 
490 {
491  cairo_int128_t s;
494  if (_cairo_int64_negative (a))
495  s.hi = _cairo_uint64_sub (s.hi,
497  if (_cairo_int64_negative (b))
498  s.hi = _cairo_uint64_sub (s.hi,
500  return s;
501 }
502 
503 cairo_uint128_t
504 _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
505 {
506  cairo_uint128_t s;
507 
508  s = _cairo_uint64x64_128_mul (a.lo, b.lo);
509  s.hi = _cairo_uint64_add (s.hi,
510  _cairo_uint64_mul (a.lo, b.hi));
511  s.hi = _cairo_uint64_add (s.hi,
512  _cairo_uint64_mul (a.hi, b.lo));
513  return s;
514 }
515 
516 cairo_uint128_t
517 _cairo_uint128_lsl (cairo_uint128_t a, int shift)
518 {
519  if (shift >= 64)
520  {
521  a.hi = a.lo;
522  a.lo = _cairo_uint32_to_uint64 (0);
523  shift -= 64;
524  }
525  if (shift)
526  {
527  a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
528  _cairo_uint64_rsl (a.lo, (64 - shift)));
529  a.lo = _cairo_uint64_lsl (a.lo, shift);
530  }
531  return a;
532 }
533 
534 cairo_uint128_t
535 _cairo_uint128_rsl (cairo_uint128_t a, int shift)
536 {
537  if (shift >= 64)
538  {
539  a.lo = a.hi;
540  a.hi = _cairo_uint32_to_uint64 (0);
541  shift -= 64;
542  }
543  if (shift)
544  {
545  a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
546  _cairo_uint64_lsl (a.hi, (64 - shift)));
547  a.hi = _cairo_uint64_rsl (a.hi, shift);
548  }
549  return a;
550 }
551 
552 cairo_uint128_t
554 {
555  if (shift >= 64)
556  {
557  a.lo = a.hi;
558  a.hi = _cairo_uint64_rsa (a.hi, 64-1);
559  shift -= 64;
560  }
561  if (shift)
562  {
563  a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
564  _cairo_uint64_lsl (a.hi, (64 - shift)));
565  a.hi = _cairo_uint64_rsa (a.hi, shift);
566  }
567  return a;
568 }
569 
570 int
571 _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
572 {
573  return (_cairo_uint64_lt (a.hi, b.hi) ||
574  (_cairo_uint64_eq (a.hi, b.hi) &&
575  _cairo_uint64_lt (a.lo, b.lo)));
576 }
577 
578 int
580 {
582  return 1;
584  return 0;
585  return _cairo_uint128_lt (a, b);
586 }
587 
588 int
589 _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
590 {
591  return (_cairo_uint64_eq (a.hi, b.hi) &&
592  _cairo_uint64_eq (a.lo, b.lo));
593 }
594 
595 #if HAVE_UINT64_T
596 #define _cairo_msbset64(q) (q & ((uint64_t) 1 << 63))
597 #else
598 #define _cairo_msbset64(q) (q.hi & ((uint32_t) 1 << 31))
599 #endif
600 
602 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
603 {
605  cairo_uint128_t bit;
606  cairo_uint128_t quo;
607 
608  bit = _cairo_uint32_to_uint128 (1);
609 
610  /* normalize to make den >= num, but not overflow */
611  while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
612  {
613  bit = _cairo_uint128_lsl (bit, 1);
614  den = _cairo_uint128_lsl (den, 1);
615  }
616  quo = _cairo_uint32_to_uint128 (0);
617 
618  /* generate quotient, one bit at a time */
620  {
621  if (_cairo_uint128_le (den, num))
622  {
623  num = _cairo_uint128_sub (num, den);
624  quo = _cairo_uint128_add (quo, bit);
625  }
626  bit = _cairo_uint128_rsl (bit, 1);
627  den = _cairo_uint128_rsl (den, 1);
628  }
629  qr.quo = quo;
630  qr.rem = num;
631  return qr;
632 }
633 
634 cairo_uint128_t
635 _cairo_uint128_negate (cairo_uint128_t a)
636 {
637  a.lo = _cairo_uint64_not (a.lo);
638  a.hi = _cairo_uint64_not (a.hi);
640 }
641 
642 cairo_uint128_t
643 _cairo_uint128_not (cairo_uint128_t a)
644 {
645  a.lo = _cairo_uint64_not (a.lo);
646  a.hi = _cairo_uint64_not (a.hi);
647  return a;
648 }
649 
650 #endif /* !HAVE_UINT128_T */
651 
654 {
655  int num_neg = _cairo_int128_negative (num);
656  int den_neg = _cairo_int128_negative (den);
657  cairo_uquorem128_t uqr;
659 
660  if (num_neg)
662  if (den_neg)
663  den = _cairo_int128_negate (den);
664  uqr = _cairo_uint128_divrem (num, den);
665  if (num_neg)
666  qr.rem = _cairo_int128_negate (uqr.rem);
667  else
668  qr.rem = uqr.rem;
669  if (num_neg != den_neg)
670  qr.quo = _cairo_int128_negate (uqr.quo);
671  else
672  qr.quo = uqr.quo;
673  return qr;
674 }
675 
687  cairo_uint64_t den)
688 {
689  cairo_uquorem64_t result;
691 
692  /* These are the high 64 bits of the *96* bit numerator. We're
693  * going to represent the numerator as xB + y, where x is a 64,
694  * and y is a 32 bit number. */
696 
697  /* Initialise the result to indicate overflow. */
698  result.quo = _cairo_uint32s_to_uint64 (UINT_MAX, UINT_MAX); //PDB cast
699  result.rem = den;
700 
701  /* Don't bother if the quotient is going to overflow. */
702  if (_cairo_uint64_ge (x, den)) {
703  return /* overflow */ result;
704  }
705 
706  if (_cairo_uint64_lt (x, B)) {
707  /* When the final quotient is known to fit in 32 bits, then
708  * num < 2^64 if and only if den < 2^32. */
710  }
711  else {
712  /* Denominator is >= 2^32. the numerator is >= 2^64, and the
713  * division won't overflow: need two divrems. Write the
714  * numerator and denominator as
715  *
716  * num = xB + y x : 64 bits, y : 32 bits
717  * den = uB + v u, v : 32 bits
718  */
719  uint32_t y = _cairo_uint128_to_uint32 (num);
720  uint32_t u = uint64_hi32 (den);
721  uint32_t v = _cairo_uint64_to_uint32 (den);
722 
723  /* Compute a lower bound approximate quotient of num/den
724  * from x/(u+1). Then we have
725  *
726  * x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
727  *
728  * xB + y = q(u+1)B + (rB+y)
729  * = q(uB + B + v - v) + (rB+y)
730  * = q(uB + v) + qB - qv + (rB+y)
731  * = q(uB + v) + q(B-v) + (rB+y)
732  *
733  * The true quotient of num/den then is q plus the
734  * contribution of q(B-v) + (rB+y). The main contribution
735  * comes from the term q(B-v), with the term (rB+y) only
736  * contributing at most one part.
737  *
738  * The term q(B-v) must fit into 64 bits, since q fits into 32
739  * bits on account of being a lower bound to the true
740  * quotient, and as B-v <= 2^32, we may safely use a single
741  * 64/64 bit division to find its contribution. */
742 
743  cairo_uquorem64_t quorem;
744  cairo_uint64_t remainder; /* will contain final remainder */
745  uint32_t quotient; /* will contain final quotient. */
746  uint32_t q;
747  uint32_t r;
748 
749  /* Approximate quotient by dividing the high 64 bits of num by
750  * u+1. Watch out for overflow of u+1. */
751  if (u+1) {
753  q = _cairo_uint64_to_uint32 (quorem.quo);
754  r = _cairo_uint64_to_uint32 (quorem.rem);
755  }
756  else {
757  q = uint64_hi32 (x);
759  }
760  quotient = q;
761 
762  /* Add the main term's contribution to quotient. Note B-v =
763  * -v as an uint32 (unless v = 0) */
764  if (v)
765  quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den); //PDB cast
766  else
767  quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
768  quotient += _cairo_uint64_to_uint32 (quorem.quo);
769 
770  /* Add the contribution of the subterm and start computing the
771  * true remainder. */
772  remainder = _cairo_uint32s_to_uint64 (r, y);
773  if (_cairo_uint64_ge (remainder, den)) {
774  remainder = _cairo_uint64_sub (remainder, den);
775  quotient++;
776  }
777 
778  /* Add the contribution of the main term's remainder. The
779  * funky test here checks that remainder + main_rem >= den,
780  * taking into account overflow of the addition. */
781  remainder = _cairo_uint64_add (remainder, quorem.rem);
782  if (_cairo_uint64_ge (remainder, den) ||
783  _cairo_uint64_lt (remainder, quorem.rem))
784  {
785  remainder = _cairo_uint64_sub (remainder, den);
786  quotient++;
787  }
788 
789  result.quo = _cairo_uint32_to_uint64 (quotient);
790  result.rem = remainder;
791  }
792  return result;
793 }
794 
797 {
798  int num_neg = _cairo_int128_negative (num);
799  int den_neg = _cairo_int64_negative (den);
800  cairo_uint64_t nonneg_den;
801  cairo_uquorem64_t uqr;
802  cairo_quorem64_t qr;
803 
804  if (num_neg)
806  if (den_neg)
807  nonneg_den = _cairo_int64_negate (den);
808  else
809  nonneg_den = den;
810 
811  uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
812  if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
813  /* bail on overflow. */
814  qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX); //PDB cast
815  qr.rem = den;
816  return qr;
817  }
818 
819  if (num_neg)
820  qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
821  else
822  qr.rem = uqr.rem;
823  if (num_neg != den_neg)
824  qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
825  else
826  qr.quo = uqr.quo;
827  return qr;
828 }
829 
830 // clang-format on
831 // NOLINTEND
cairo_x function declarations, which provide the fallback high precision arithmetic implementation.
#define _cairo_int64_lt(a, b)
#define _cairo_uint64_to_uint32(i)
#define _cairo_uint64_eq(a, b)
#define _cairo_int64_negate(a)
#define _cairo_uint64_add(a, b)
#define _cairo_uint64_negate(a)
#define _cairo_uint64_lt(a, b)
int64_t cairo_int64_t
#define _cairo_int32x32_64_mul(a, b)
#define _cairo_uint64_mul(a, b)
#define _cairo_uint32x32_64_mul(a, b)
#define _cairo_uint64_ge(a, b)
#define _cairo_uint64_gt(a, b)
#define _cairo_uint64_lsl(a, b)
#define _cairo_int32_to_int64(i)
#define _cairo_uint128_ne(a, b)
#define _cairo_uint64_le(a, b)
#define _cairo_uint128_le(a, b)
#define _cairo_int64_to_uint64(i)
#define _cairo_uint64_sub(a, b)
#define _cairo_int64_negative(a)
#define _cairo_uint128_to_uint64(a)
#define _cairo_uint64_rsa(a, b)
#define _cairo_int128_negative(a)
#define _cairo_uint64_rsl(a, b)
uint64_t cairo_uint64_t
#define _cairo_uint128_to_uint32(a)
#define _cairo_uint64_not(a)
#define _cairo_uint32_to_uint64(i)
#define _cairo_int128_negate(a)
#define uint64_shift32(i)
cairo_uint128_t _cairo_uint128_lsl(cairo_uint128_t a, int shift)
cairo_uint128_t _cairo_uint32_to_uint128(uint32_t i)
#define uint64_lo(i)
cairo_uint128_t _cairo_uint128_negate(cairo_uint128_t a)
#define uint64_hi(i)
cairo_uint128_t _cairo_uint128_not(cairo_uint128_t a)
cairo_quorem64_t _cairo_int_96by64_32x64_divrem(cairo_int128_t num, cairo_int64_t den)
cairo_uint128_t _cairo_uint64_to_uint128(cairo_uint64_t i)
#define uint64_carry32
const char * cairo_impl64
Definition: cairo-wideint.c:48
cairo_uint128_t _cairo_uint128_sub(cairo_uint128_t a, cairo_uint128_t b)
cairo_quorem128_t _cairo_int128_divrem(cairo_int128_t num, cairo_int128_t den)
#define _cairo_msbset64(q)
cairo_uquorem64_t _cairo_uint64_divrem(cairo_uint64_t num, cairo_uint64_t den)
Definition: cairo-wideint.c:53
int _cairo_uint128_eq(cairo_uint128_t a, cairo_uint128_t b)
cairo_uint128_t _cairo_uint128_rsl(cairo_uint128_t a, int shift)
const char * cairo_impl128
#define _cairo_uint32s_to_uint64(h, l)
Definition: cairo-wideint.c:50
cairo_uquorem64_t _cairo_uint_96by64_32x64_divrem(cairo_uint128_t num, cairo_uint64_t den)
_cairo_uint_96by64_32x64_divrem:
cairo_int128_t _cairo_int64x64_128_mul(cairo_int64_t a, cairo_int64_t b)
#define uint64_hi32(i)
#define uint64_lo32(i)
cairo_int128_t _cairo_int32_to_int128(int32_t i)
cairo_uquorem128_t _cairo_uint128_divrem(cairo_uint128_t num, cairo_uint128_t den)
int _cairo_uint128_lt(cairo_uint128_t a, cairo_uint128_t b)
cairo_int128_t _cairo_int64_to_int128(cairo_int64_t i)
cairo_uint128_t _cairo_uint128_add(cairo_uint128_t a, cairo_uint128_t b)
cairo_quorem64_t _cairo_int64_divrem(cairo_int64_t num, cairo_int64_t den)
cairo_uint128_t _cairo_uint64x64_128_mul(cairo_uint64_t a, cairo_uint64_t b)
int _cairo_int128_lt(cairo_int128_t a, cairo_int128_t b)
cairo_uint128_t _cairo_uint128_mul(cairo_uint128_t a, cairo_uint128_t b)
cairo_uint128_t _cairo_uint128_rsa(cairo_int128_t a, int shift)