mirror of
https://github.com/crosstool-ng/crosstool-ng.git
synced 2024-12-27 08:12:30 +00:00
1906cf93f8
You might just say: 'Yeah! crosstool-NG's got its own repo!". Unfortunately, that's because the previous repo got damaged beyond repair and I had no backup. That means I'm putting backups in place in the afternoon. That also means we've lost history... :-(
775 lines
22 KiB
Diff
775 lines
22 KiB
Diff
--- glibc-2.1.3/stdlib/longlong.h.old 2004-03-05 14:49:14.000000000 -0800
|
|
+++ glibc-2.1.3/stdlib/longlong.h 2004-03-05 15:19:26.000000000 -0800
|
|
@@ -106,8 +106,8 @@
|
|
|
|
#if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("add %1,%4,%5
|
|
- addc %0,%2,%3" \
|
|
+ __asm__ ("add %1,%4,%5\n" \
|
|
+ "addc %0,%2,%3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%r" ((USItype)(ah)), \
|
|
@@ -115,8 +115,8 @@
|
|
"%r" ((USItype)(al)), \
|
|
"rI" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("sub %1,%4,%5
|
|
- subc %0,%2,%3" \
|
|
+ __asm__ ("sub %1,%4,%5\n" \
|
|
+ "subc %0,%2,%3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "r" ((USItype)(ah)), \
|
|
@@ -173,8 +173,8 @@
|
|
|
|
#if defined (__arm__) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("adds %1, %4, %5
|
|
- adc %0, %2, %3" \
|
|
+ __asm__ ("adds %1, %4, %5\n" \
|
|
+ "adc %0, %2, %3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%r" ((USItype)(ah)), \
|
|
@@ -182,8 +182,8 @@
|
|
"%r" ((USItype)(al)), \
|
|
"rI" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subs %1, %4, %5
|
|
- sbc %0, %2, %3" \
|
|
+ __asm__ ("subs %1, %4, %5\n" \
|
|
+ "sbc %0, %2, %3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "r" ((USItype)(ah)), \
|
|
@@ -192,19 +192,19 @@
|
|
"rI" ((USItype)(bl)))
|
|
#if 0
|
|
#define umul_ppmm(xh, xl, a, b) \
|
|
- __asm__ ("%@ Inlined umul_ppmm
|
|
- mov %|r0, %2, lsr #16
|
|
- mov %|r2, %3, lsr #16
|
|
- bic %|r1, %2, %|r0, lsl #16
|
|
- bic %|r2, %3, %|r2, lsl #16
|
|
- mul %1, %|r1, %|r2
|
|
- mul %|r2, %|r0, %|r2
|
|
- mul %|r1, %0, %|r1
|
|
- mul %0, %|r0, %0
|
|
- adds %|r1, %|r2, %|r1
|
|
- addcs %0, %0, #65536
|
|
- adds %1, %1, %|r1, lsl #16
|
|
- adc %0, %0, %|r1, lsr #16" \
|
|
+ __asm__ ("%@ Inlined umul_ppmm\n" \
|
|
+ "mov %|r0, %2, lsr #16\n" \
|
|
+ "mov %|r2, %3, lsr #16\n" \
|
|
+ "bic %|r1, %2, %|r0, lsl #16\n" \
|
|
+ "bic %|r2, %3, %|r2, lsl #16\n" \
|
|
+ "mul %1, %|r1, %|r2\n" \
|
|
+ "mul %|r2, %|r0, %|r2\n" \
|
|
+ "mul %|r1, %0, %|r1\n" \
|
|
+ "mul %0, %|r0, %0\n" \
|
|
+ "adds %|r1, %|r2, %|r1\n" \
|
|
+ "addcs %0, %0, #65536\n" \
|
|
+ "adds %1, %1, %|r1, lsl #16\n" \
|
|
+ "adc %0, %0, %|r1, lsr #16" \
|
|
: "=&r" ((USItype)(xh)), \
|
|
"=r" ((USItype)(xl)) \
|
|
: "r" ((USItype)(a)), \
|
|
@@ -245,8 +245,8 @@
|
|
|
|
#if defined (__gmicro__) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("add.w %5,%1
|
|
- addx %3,%0" \
|
|
+ __asm__ ("add.w %5,%1\n" \
|
|
+ "addx %3,%0" \
|
|
: "=g" ((USItype)(sh)), \
|
|
"=&g" ((USItype)(sl)) \
|
|
: "%0" ((USItype)(ah)), \
|
|
@@ -254,8 +254,8 @@
|
|
"%1" ((USItype)(al)), \
|
|
"g" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("sub.w %5,%1
|
|
- subx %3,%0" \
|
|
+ __asm__ ("sub.w %5,%1\n" \
|
|
+ "subx %3,%0" \
|
|
: "=g" ((USItype)(sh)), \
|
|
"=&g" ((USItype)(sl)) \
|
|
: "0" ((USItype)(ah)), \
|
|
@@ -284,8 +284,8 @@
|
|
|
|
#if defined (__hppa) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("add %4,%5,%1
|
|
- addc %2,%3,%0" \
|
|
+ __asm__ ("add %4,%5,%1\n" \
|
|
+ "addc %2,%3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%rM" ((USItype)(ah)), \
|
|
@@ -293,8 +293,8 @@
|
|
"%rM" ((USItype)(al)), \
|
|
"rM" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("sub %4,%5,%1
|
|
- subb %2,%3,%0" \
|
|
+ __asm__ ("sub %4,%5,%1\n" \
|
|
+ "subb %2,%3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "rM" ((USItype)(ah)), \
|
|
@@ -332,22 +332,22 @@
|
|
do { \
|
|
USItype __tmp; \
|
|
__asm__ ( \
|
|
- "ldi 1,%0
|
|
- extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
|
|
- extru,tr %1,15,16,%1 ; No. Shift down, skip add.
|
|
- ldo 16(%0),%0 ; Yes. Perform add.
|
|
- extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
|
|
- extru,tr %1,23,8,%1 ; No. Shift down, skip add.
|
|
- ldo 8(%0),%0 ; Yes. Perform add.
|
|
- extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
|
|
- extru,tr %1,27,4,%1 ; No. Shift down, skip add.
|
|
- ldo 4(%0),%0 ; Yes. Perform add.
|
|
- extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
|
|
- extru,tr %1,29,2,%1 ; No. Shift down, skip add.
|
|
- ldo 2(%0),%0 ; Yes. Perform add.
|
|
- extru %1,30,1,%1 ; Extract bit 1.
|
|
- sub %0,%1,%0 ; Subtract it.
|
|
- " : "=r" (count), "=r" (__tmp) : "1" (x)); \
|
|
+ "ldi 1,%0\n" \
|
|
+ "extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
|
|
+ "extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
|
|
+ "ldo 16(%0),%0 ; Yes. Perform add.\n" \
|
|
+ "extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
|
|
+ "extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
|
|
+ "ldo 8(%0),%0 ; Yes. Perform add.\n" \
|
|
+ "extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
|
|
+ "extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
|
|
+ "ldo 4(%0),%0 ; Yes. Perform add.\n" \
|
|
+ "extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
|
|
+ "extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
|
|
+ "ldo 2(%0),%0 ; Yes. Perform add.\n" \
|
|
+ "extru %1,30,1,%1 ; Extract bit 1.\n" \
|
|
+ "sub %0,%1,%0 ; Subtract it.\n" \
|
|
+ : "=r" (count), "=r" (__tmp) : "1" (x)); \
|
|
} while (0)
|
|
#endif /* hppa */
|
|
|
|
@@ -394,8 +394,8 @@
|
|
|
|
#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("addl %5,%1
|
|
- adcl %3,%0" \
|
|
+ __asm__ ("addl %5,%1\n" \
|
|
+ "adcl %3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%0" ((USItype)(ah)), \
|
|
@@ -403,8 +403,8 @@
|
|
"%1" ((USItype)(al)), \
|
|
"g" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subl %5,%1
|
|
- sbbl %3,%0" \
|
|
+ __asm__ ("subl %5,%1\n" \
|
|
+ "sbbl %3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "0" ((USItype)(ah)), \
|
|
@@ -516,8 +516,8 @@
|
|
|
|
#if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("add%.l %5,%1
|
|
- addx%.l %3,%0" \
|
|
+ __asm__ ("add%.l %5,%1\n" \
|
|
+ "addx%.l %3,%0" \
|
|
: "=d" ((USItype)(sh)), \
|
|
"=&d" ((USItype)(sl)) \
|
|
: "%0" ((USItype)(ah)), \
|
|
@@ -525,8 +525,8 @@
|
|
"%1" ((USItype)(al)), \
|
|
"g" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("sub%.l %5,%1
|
|
- subx%.l %3,%0" \
|
|
+ __asm__ ("sub%.l %5,%1\n" \
|
|
+ "subx%.l %3,%0" \
|
|
: "=d" ((USItype)(sh)), \
|
|
"=&d" ((USItype)(sl)) \
|
|
: "0" ((USItype)(ah)), \
|
|
@@ -564,28 +564,28 @@
|
|
#else /* not mc68020 */
|
|
#define umul_ppmm(xh, xl, a, b) \
|
|
do { USItype __umul_tmp1, __umul_tmp2; \
|
|
- __asm__ ("| Inlined umul_ppmm
|
|
- move%.l %5,%3
|
|
- move%.l %2,%0
|
|
- move%.w %3,%1
|
|
- swap %3
|
|
- swap %0
|
|
- mulu %2,%1
|
|
- mulu %3,%0
|
|
- mulu %2,%3
|
|
- swap %2
|
|
- mulu %5,%2
|
|
- add%.l %3,%2
|
|
- jcc 1f
|
|
- add%.l %#0x10000,%0
|
|
-1: move%.l %2,%3
|
|
- clr%.w %2
|
|
- swap %2
|
|
- swap %3
|
|
- clr%.w %3
|
|
- add%.l %3,%1
|
|
- addx%.l %2,%0
|
|
- | End inlined umul_ppmm" \
|
|
+ __asm__ ("| Inlined umul_ppmm\n" \
|
|
+ "move%.l %5,%3\n" \
|
|
+ "move%.l %2,%0\n" \
|
|
+ "move%.w %3,%1\n" \
|
|
+ "swap %3\n" \
|
|
+ "swap %0\n" \
|
|
+ "mulu %2,%1\n" \
|
|
+ "mulu %3,%0\n" \
|
|
+ "mulu %2,%3\n" \
|
|
+ "swap %2\n" \
|
|
+ "mulu %5,%2\n" \
|
|
+ "add%.l %3,%2\n" \
|
|
+ "jcc 1f\n" \
|
|
+ "add%.l %#0x10000,%0\n" \
|
|
+"1: move%.l %2,%3\n" \
|
|
+ "clr%.w %2\n" \
|
|
+ "swap %2\n" \
|
|
+ "swap %3\n" \
|
|
+ "clr%.w %3\n" \
|
|
+ "add%.l %3,%1\n" \
|
|
+ "addx%.l %2,%0\n" \
|
|
+ "| End inlined umul_ppmm" \
|
|
: "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \
|
|
"=d" (__umul_tmp1), "=&d" (__umul_tmp2) \
|
|
: "%2" ((USItype)(a)), "d" ((USItype)(b))); \
|
|
@@ -597,8 +597,8 @@
|
|
|
|
#if defined (__m88000__) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("addu.co %1,%r4,%r5
|
|
- addu.ci %0,%r2,%r3" \
|
|
+ __asm__ ("addu.co %1,%r4,%r5\n" \
|
|
+ "addu.ci %0,%r2,%r3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%rJ" ((USItype)(ah)), \
|
|
@@ -606,8 +606,8 @@
|
|
"%rJ" ((USItype)(al)), \
|
|
"rJ" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subu.co %1,%r4,%r5
|
|
- subu.ci %0,%r2,%r3" \
|
|
+ __asm__ ("subu.co %1,%r4,%r5\n" \
|
|
+ "subu.ci %0,%r2,%r3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "rJ" ((USItype)(ah)), \
|
|
@@ -665,9 +665,9 @@
|
|
"d" ((USItype)(v)))
|
|
#else
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
- __asm__ ("multu %2,%3
|
|
- mflo %0
|
|
- mfhi %1" \
|
|
+ __asm__ ("multu %2,%3\n" \
|
|
+ "mflo %0\n" \
|
|
+ "mfhi %1" \
|
|
: "=d" ((USItype)(w0)), \
|
|
"=d" ((USItype)(w1)) \
|
|
: "d" ((USItype)(u)), \
|
|
@@ -687,9 +687,9 @@
|
|
"d" ((UDItype)(v)))
|
|
#else
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
- __asm__ ("dmultu %2,%3
|
|
- mflo %0
|
|
- mfhi %1" \
|
|
+ __asm__ ("dmultu %2,%3\n" \
|
|
+ "mflo %0\n" \
|
|
+ "mfhi %1" \
|
|
: "=d" ((UDItype)(w0)), \
|
|
"=d" ((UDItype)(w1)) \
|
|
: "d" ((UDItype)(u)), \
|
|
@@ -857,8 +857,8 @@
|
|
|
|
#if defined (__pyr__) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("addw %5,%1
|
|
- addwc %3,%0" \
|
|
+ __asm__ ("addw %5,%1\n" \
|
|
+ "addwc %3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%0" ((USItype)(ah)), \
|
|
@@ -866,8 +866,8 @@
|
|
"%1" ((USItype)(al)), \
|
|
"g" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subw %5,%1
|
|
- subwb %3,%0" \
|
|
+ __asm__ ("subw %5,%1\n" \
|
|
+ "subwb %3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "0" ((USItype)(ah)), \
|
|
@@ -879,8 +879,8 @@
|
|
({union {UDItype __ll; \
|
|
struct {USItype __h, __l;} __i; \
|
|
} __xx; \
|
|
- __asm__ ("movw %1,%R0
|
|
- uemul %2,%0" \
|
|
+ __asm__ ("movw %1,%R0\n" \
|
|
+ "uemul %2,%0" \
|
|
: "=&r" (__xx.__ll) \
|
|
: "g" ((USItype) (u)), \
|
|
"g" ((USItype)(v))); \
|
|
@@ -889,8 +889,8 @@
|
|
|
|
#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("a %1,%5
|
|
- ae %0,%3" \
|
|
+ __asm__ ("a %1,%5\n" \
|
|
+ "ae %0,%3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%0" ((USItype)(ah)), \
|
|
@@ -898,8 +898,8 @@
|
|
"%1" ((USItype)(al)), \
|
|
"r" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("s %1,%5
|
|
- se %0,%3" \
|
|
+ __asm__ ("s %1,%5\n" \
|
|
+ "se %0,%3" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "0" ((USItype)(ah)), \
|
|
@@ -910,26 +910,26 @@
|
|
do { \
|
|
USItype __m0 = (m0), __m1 = (m1); \
|
|
__asm__ ( \
|
|
- "s r2,r2
|
|
- mts r10,%2
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- m r2,%3
|
|
- cas %0,r2,r0
|
|
- mfs r10,%1" \
|
|
+ "s r2,r2\n" \
|
|
+ "mts r10,%2\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "m r2,%3\n" \
|
|
+ "cas %0,r2,r0\n" \
|
|
+ "mfs r10,%1" \
|
|
: "=r" ((USItype)(ph)), \
|
|
"=r" ((USItype)(pl)) \
|
|
: "%r" (__m0), \
|
|
@@ -959,9 +959,9 @@
|
|
#if defined (__sh2__) && W_TYPE_SIZE == 32
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
__asm__ ( \
|
|
- "dmulu.l %2,%3
|
|
- sts macl,%1
|
|
- sts mach,%0" \
|
|
+ "dmulu.l %2,%3\n" \
|
|
+ "sts macl,%1\n" \
|
|
+ "sts mach,%0" \
|
|
: "=r" ((USItype)(w1)), \
|
|
"=r" ((USItype)(w0)) \
|
|
: "r" ((USItype)(u)), \
|
|
@@ -972,8 +972,8 @@
|
|
|
|
#if defined (__sparc__) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("addcc %r4,%5,%1
|
|
- addx %r2,%3,%0" \
|
|
+ __asm__ ("addcc %r4,%5,%1\n" \
|
|
+ "addx %r2,%3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "%rJ" ((USItype)(ah)), \
|
|
@@ -982,8 +982,8 @@
|
|
"rI" ((USItype)(bl)) \
|
|
__CLOBBER_CC)
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subcc %r4,%5,%1
|
|
- subx %r2,%3,%0" \
|
|
+ __asm__ ("subcc %r4,%5,%1\n" \
|
|
+ "subx %r2,%3,%0" \
|
|
: "=r" ((USItype)(sh)), \
|
|
"=&r" ((USItype)(sl)) \
|
|
: "rJ" ((USItype)(ah)), \
|
|
@@ -1029,45 +1029,45 @@
|
|
"r" ((USItype)(v)))
|
|
#define UMUL_TIME 5
|
|
#define udiv_qrnnd(q, r, n1, n0, d) \
|
|
- __asm__ ("! Inlined udiv_qrnnd
|
|
- wr %%g0,%2,%%y ! Not a delayed write for sparclite
|
|
- tst %%g0
|
|
- divscc %3,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%%g1
|
|
- divscc %%g1,%4,%0
|
|
- rd %%y,%1
|
|
- bl,a 1f
|
|
- add %1,%4,%1
|
|
-1: ! End of inline udiv_qrnnd" \
|
|
+ __asm__ ("! Inlined udiv_qrnnd\n" \
|
|
+ "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
|
|
+ "tst %%g0\n" \
|
|
+ "divscc %3,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%%g1\n" \
|
|
+ "divscc %%g1,%4,%0\n" \
|
|
+ "rd %%y,%1\n" \
|
|
+ "bl,a 1f\n" \
|
|
+ "add %1,%4,%1\n" \
|
|
+"1: ! End of inline udiv_qrnnd" \
|
|
: "=r" ((USItype)(q)), \
|
|
"=r" ((USItype)(r)) \
|
|
: "r" ((USItype)(n1)), \
|
|
@@ -1087,46 +1087,46 @@
|
|
/* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
|
|
#ifndef umul_ppmm
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
- __asm__ ("! Inlined umul_ppmm
|
|
- wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
|
|
- sra %3,31,%%g2 ! Don't move this insn
|
|
- and %2,%%g2,%%g2 ! Don't move this insn
|
|
- andcc %%g0,0,%%g1 ! Don't move this insn
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,%3,%%g1
|
|
- mulscc %%g1,0,%%g1
|
|
- add %%g1,%%g2,%0
|
|
- rd %%y,%1" \
|
|
+ __asm__ ("! Inlined umul_ppmm\n" \
|
|
+ "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
|
|
+ "sra %3,31,%%g2 ! Don't move this insn\n" \
|
|
+ "and %2,%%g2,%%g2 ! Don't move this insn\n" \
|
|
+ "andcc %%g0,0,%%g1 ! Don't move this insn\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,%3,%%g1\n" \
|
|
+ "mulscc %%g1,0,%%g1\n" \
|
|
+ "add %%g1,%%g2,%0\n" \
|
|
+ "rd %%y,%1" \
|
|
: "=r" ((USItype)(w1)), \
|
|
"=r" ((USItype)(w0)) \
|
|
: "%rI" ((USItype)(u)), \
|
|
@@ -1138,30 +1138,30 @@
|
|
/* It's quite necessary to add this much assembler for the sparc.
|
|
The default udiv_qrnnd (in C) is more than 10 times slower! */
|
|
#define udiv_qrnnd(q, r, n1, n0, d) \
|
|
- __asm__ ("! Inlined udiv_qrnnd
|
|
- mov 32,%%g1
|
|
- subcc %1,%2,%%g0
|
|
-1: bcs 5f
|
|
- addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
|
|
- sub %1,%2,%1 ! this kills msb of n
|
|
- addx %1,%1,%1 ! so this can't give carry
|
|
- subcc %%g1,1,%%g1
|
|
-2: bne 1b
|
|
- subcc %1,%2,%%g0
|
|
- bcs 3f
|
|
- addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
|
|
- b 3f
|
|
- sub %1,%2,%1 ! this kills msb of n
|
|
-4: sub %1,%2,%1
|
|
-5: addxcc %1,%1,%1
|
|
- bcc 2b
|
|
- subcc %%g1,1,%%g1
|
|
-! Got carry from n. Subtract next step to cancel this carry.
|
|
- bne 4b
|
|
- addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
|
|
- sub %1,%2,%1
|
|
-3: xnor %0,0,%0
|
|
- ! End of inline udiv_qrnnd" \
|
|
+ __asm__ ("! Inlined udiv_qrnnd\n" \
|
|
+ "mov 32,%%g1\n" \
|
|
+ "subcc %1,%2,%%g0\n" \
|
|
+"1: bcs 5f\n" \
|
|
+ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
|
|
+ "sub %1,%2,%1 ! this kills msb of n\n" \
|
|
+ "addx %1,%1,%1 ! so this can't give carry\n" \
|
|
+ "subcc %%g1,1,%%g1\n" \
|
|
+"2: bne 1b\n" \
|
|
+ "subcc %1,%2,%%g0\n" \
|
|
+ "bcs 3f\n" \
|
|
+ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
|
|
+ "b 3f\n" \
|
|
+ "sub %1,%2,%1 ! this kills msb of n\n" \
|
|
+"4: sub %1,%2,%1\n" \
|
|
+"5: addxcc %1,%1,%1\n" \
|
|
+ "bcc 2b\n" \
|
|
+ "subcc %%g1,1,%%g1\n" \
|
|
+"! Got carry from n. Subtract next step to cancel this carry.\n" \
|
|
+ "bne 4b\n" \
|
|
+ "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n" \
|
|
+ "sub %1,%2,%1\n" \
|
|
+"3: xnor %0,0,%0\n" \
|
|
+ "! End of inline udiv_qrnnd" \
|
|
: "=&r" ((USItype)(q)), \
|
|
"=&r" ((USItype)(r)) \
|
|
: "r" ((USItype)(d)), \
|
|
@@ -1179,11 +1179,11 @@
|
|
#if (defined (__sparc_v9__) || (defined (__sparc__) && defined (__arch64__)) \
|
|
|| defined (__sparcv9)) && W_TYPE_SIZE == 64
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("addcc %r4,%5,%1
|
|
- add %r2,%3,%0
|
|
- bcs,a,pn %%xcc, 1f
|
|
- add %0, 1, %0
|
|
- 1:" \
|
|
+ __asm__ ("addcc %r4,%5,%1\n" \
|
|
+ "add %r2,%3,%0\n" \
|
|
+ "bcs,a,pn %%xcc, 1f\n" \
|
|
+ "add %0, 1, %0\n" \
|
|
+ "1:" \
|
|
: "=r" ((UDItype)(sh)), \
|
|
"=&r" ((UDItype)(sl)) \
|
|
: "r" ((UDItype)(ah)), \
|
|
@@ -1193,11 +1193,11 @@
|
|
: "cc")
|
|
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subcc %r4,%5,%1
|
|
- sub %r2,%3,%0
|
|
- bcs,a,pn %%xcc, 1f
|
|
- sub %0, 1, %0
|
|
- 1:" \
|
|
+ __asm__ ("subcc %r4,%5,%1\n" \
|
|
+ "sub %r2,%3,%0\n" \
|
|
+ "bcs,a,pn %%xcc, 1f\n" \
|
|
+ "sub %0, 1, %0\n" \
|
|
+ "1:" \
|
|
: "=r" ((UDItype)(sh)), \
|
|
"=&r" ((UDItype)(sl)) \
|
|
: "r" ((UDItype)(ah)), \
|
|
@@ -1210,27 +1210,27 @@
|
|
do { \
|
|
UDItype tmp1, tmp2, tmp3, tmp4; \
|
|
__asm__ __volatile__ ( \
|
|
- "srl %7,0,%3
|
|
- mulx %3,%6,%1
|
|
- srlx %6,32,%2
|
|
- mulx %2,%3,%4
|
|
- sllx %4,32,%5
|
|
- srl %6,0,%3
|
|
- sub %1,%5,%5
|
|
- srlx %5,32,%5
|
|
- addcc %4,%5,%4
|
|
- srlx %7,32,%5
|
|
- mulx %3,%5,%3
|
|
- mulx %2,%5,%5
|
|
- sethi %%hi(0x80000000),%2
|
|
- addcc %4,%3,%4
|
|
- srlx %4,32,%4
|
|
- add %2,%2,%2
|
|
- movcc %%xcc,%%g0,%2
|
|
- addcc %5,%4,%5
|
|
- sllx %3,32,%3
|
|
- add %1,%3,%1
|
|
- add %5,%2,%0" \
|
|
+ "srl %7,0,%3\n" \
|
|
+ "mulx %3,%6,%1\n" \
|
|
+ "srlx %6,32,%2\n" \
|
|
+ "mulx %2,%3,%4\n" \
|
|
+ "sllx %4,32,%5\n" \
|
|
+ "srl %6,0,%3\n" \
|
|
+ "sub %1,%5,%5\n" \
|
|
+ "srlx %5,32,%5\n" \
|
|
+ "addcc %4,%5,%4\n" \
|
|
+ "srlx %7,32,%5\n" \
|
|
+ "mulx %3,%5,%3\n" \
|
|
+ "mulx %2,%5,%5\n" \
|
|
+ "sethi %%hi(0x80000000),%2\n" \
|
|
+ "addcc %4,%3,%4\n" \
|
|
+ "srlx %4,32,%4\n" \
|
|
+ "add %2,%2,%2\n" \
|
|
+ "movcc %%xcc,%%g0,%2\n" \
|
|
+ "addcc %5,%4,%5\n" \
|
|
+ "sllx %3,32,%3\n" \
|
|
+ "add %1,%3,%1\n" \
|
|
+ "add %5,%2,%0" \
|
|
: "=r" ((UDItype)(wh)), \
|
|
"=&r" ((UDItype)(wl)), \
|
|
"=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
|
|
@@ -1244,8 +1244,8 @@
|
|
|
|
#if defined (__vax__) && W_TYPE_SIZE == 32
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("addl2 %5,%1
|
|
- adwc %3,%0" \
|
|
+ __asm__ ("addl2 %5,%1\n" \
|
|
+ "adwc %3,%0" \
|
|
: "=g" ((USItype)(sh)), \
|
|
"=&g" ((USItype)(sl)) \
|
|
: "%0" ((USItype)(ah)), \
|
|
@@ -1253,8 +1253,8 @@
|
|
"%1" ((USItype)(al)), \
|
|
"g" ((USItype)(bl)))
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
- __asm__ ("subl2 %5,%1
|
|
- sbwc %3,%0" \
|
|
+ __asm__ ("subl2 %5,%1\n" \
|
|
+ "sbwc %3,%0" \
|
|
: "=g" ((USItype)(sh)), \
|
|
"=&g" ((USItype)(sl)) \
|
|
: "0" ((USItype)(ah)), \
|