Browse Source

made to work, and added the b64 shift routines to the interface

ceriel 35 years ago
parent
commit
1e9c82d6e5

+ 2 - 2
modules/src/flt_arith/b64_add.c

@@ -8,8 +8,8 @@
 # include "misc.h"
 
 int
-b64_add(e1,e2)
-	register struct _mantissa *e1,*e2;
+flt_b64_add(e1,e2)
+	register struct flt_mantissa *e1,*e2;
 {
 	int	overflow;
 	int	carry;

+ 6 - 6
modules/src/flt_arith/b64_sft.c

@@ -7,8 +7,8 @@
 
 #include "misc.h"
 
-b64_sft(e,n)
-	register struct _mantissa *e;
+flt_b64_sft(e,n)
+	register struct flt_mantissa *e;
 	register int n;
 {
 	if (n > 0) {
@@ -56,8 +56,8 @@ b64_sft(e,n)
 	}
 }
 
-b64_lsft(e)
-	register struct _mantissa *e;
+flt_b64_lsft(e)
+	register struct flt_mantissa *e;
 {
 	/*	shift left 1 bit */
 	e->flt_h_32 = (e->flt_h_32 << 1) & 0xFFFFFFFF;
@@ -65,8 +65,8 @@ b64_lsft(e)
 	e->flt_l_32 = (e->flt_l_32 << 1) & 0xFFFFFFFF;
 }
 
-b64_rsft(e)
-	register struct _mantissa *e;
+flt_b64_rsft(e)
+	register struct flt_mantissa *e;
 {
 	/*	shift right 1 bit */
 	e->flt_l_32 = (e->flt_l_32 >> 1) & 0x7FFFFFFF;

+ 25 - 24
modules/src/flt_arith/flt_add.c

@@ -13,7 +13,7 @@ flt_add(e1,e2,e3)
 	/*	Add two extended numbers e1 and e2, and put the result
 		in e3
 	*/
-	flt_arith ce2;
+	flt_arith ce1, ce2;
 	int diff;
 
 	flt_status = 0;
@@ -26,43 +26,46 @@ flt_add(e1,e2,e3)
 		return;
 	}
 	ce2 = *e2;
-	*e3 = *e1;
-	e1 = &ce2;
+	ce1 = *e1;
+	e1 = &ce1;
+	e2 = &ce2;
 
 	/* adjust mantissas to equal power */
-	diff = e3->flt_exp - e1->flt_exp;
+	diff = e2->flt_exp - e1->flt_exp;
 	if (diff < 0) {
 		diff = -diff;
-		e3->flt_exp += diff;
-		b64_sft(&(e3->flt_mantissa), diff);
+		e2->flt_exp += diff;
+		flt_b64_sft(&(e2->flt_mantissa), diff);
 	}
 	else if (diff > 0) {
 		e1->flt_exp += diff;
-		b64_sft(&(e1->flt_mantissa), diff);
+		flt_b64_sft(&(e1->flt_mantissa), diff);
 	}
-	if (e1->flt_sign != e3->flt_sign) {
-		/* e3 + e1 = e3 - (-e1) */
-		int tmp = ucmp(e1->m1, e3->m1);
-		int tmp2 = ucmp(e1->m2, e3->m2);
+	if (e1->flt_sign != e2->flt_sign) {
+		/* e2 + e1 = e2 - (-e1) */
+		int tmp = ucmp(e1->m1, e2->m1);
+		int tmp2 = ucmp(e1->m2, e2->m2);
 		if (tmp > 0 || (tmp == 0 && tmp2 > 0)) {
-                	/*      abs(e1) > abs(e3) */
+                	/*      abs(e1) > abs(e2) */
                 	if (tmp2 < 0) {
                         	e1->m1 -= 1;    /* carry in */
                 	}
-                	e1->m1 -= e3->m1;
-                	e1->m2 -= e3->m2;
+                	e1->m1 -= e2->m1;
+                	e1->m2 -= e2->m2;
                 	*e3 = *e1;
         	}
         	else {
                 	if (tmp2 > 0)
-                        	e3->m1 -= 1;    /* carry in */
-                	e3->m1 -= e1->m1;
-                	e3->m2 -= e1->m2;
+                        	e2->m1 -= 1;    /* carry in */
+                	e2->m1 -= e1->m1;
+                	e2->m2 -= e1->m2;
+			*e3 = *e2;
         	}
 	}
 	else {
-		if (b64_add(&e3->flt_mantissa,&e1->flt_mantissa)) {/* addition carry */
-			b64_sft(&e3->flt_mantissa,1);/* shift mantissa one bit RIGHT */
+		*e3 = *e2;
+		if (flt_b64_add(&e3->flt_mantissa,&e1->flt_mantissa)) {/* addition carry */
+			flt_b64_rsft(&e3->flt_mantissa);
 			e3->m1 |= 0x80000000L;	/* set max bit	*/
 			e3->flt_exp++;		/* increase the exponent */
 		}
@@ -74,9 +77,7 @@ flt_add(e1,e2,e3)
 flt_sub(e1,e2,e3)
 	flt_arith *e1,*e2,*e3;
 {
-	flt_arith ce2;
-
-	ce2 = *e2;
-	ce2.flt_sign = ! ce2.flt_sign;
-	flt_add(e1,&ce2,e3);
+	e2->flt_sign = ! e2->flt_sign;
+	flt_add(e1,e2,e3);
+	e2->flt_sign = ! e2->flt_sign;
 }

+ 2 - 2
modules/src/flt_arith/flt_ar2flt.c

@@ -35,13 +35,13 @@ flt_arith2flt(n, e)
 		e->flt_exp++;
 	}
 	for (i = 64; i > 0 && n != 0; i--) {
-		b64_rsft(&(e->flt_mantissa));
+		flt_b64_rsft(&(e->flt_mantissa));
 		e->m1 |= (n & 1) << 31;
 		n >>= 1;
 	}
 
 	if (i > 0) {
-		b64_sft(&(e->flt_mantissa), i);
+		flt_b64_sft(&(e->flt_mantissa), i);
 	}
 	flt_status = 0;
 	flt_nrm(e);

+ 54 - 0
modules/src/flt_arith/flt_arith.3

@@ -6,6 +6,25 @@ flt_arith \- high precision floating point arithmetic
 .nf
 .B #include <flt_arith.h>
 .PP
+.ta 5m 30m
+struct flt_mantissa {
+	long	flt_h_32;	/* high order 32 bits of mantissa */
+	long	flt_l_32;	/* low order 32 bits of mantissa */
+};
+
+typedef struct {
+	short	flt_sign;	/* 0 for positive, 1 for negative */
+	short	flt_exp;	/* between -16384 and 16384 */
+	struct flt_mantissa flt_mantissa;	/* normalized, in [1,2). */
+} flt_arith;
+
+extern int	flt_status;
+#define FLT_OVFL	001
+#define FLT_UNFL	002
+#define FLT_DIV0	004
+#define	FLT_NOFLT	010
+#define FLT_BTSM	020
+.PP
 .B flt_add(e1, e2, e3)
 .B flt_arith *e1, *e2, *e3;
 .PP
@@ -42,6 +61,16 @@ flt_arith \- high precision floating point arithmetic
 .PP
 .B arith flt_flt2arith(e)
 .B flt_arith *e;
+.PP
+.B flt_b64_sft(m, n)
+.B struct flt_mantissa *m;
+.B int n;
+.PP
+.B flt_b64_rsft(m)
+.B struct flt_mantissa *m;
+.PP
+.B flt_b64_lsft(m)
+.B struct flt_mantissa *m;
 .SH DESCRIPTION
 This set of routines emulates floating point arithmetic, in a high
 precision. It is intended primarily for compilers that need to evaluate
@@ -198,6 +227,31 @@ This can only occur with the routine
 indicates that the buffer is too small. The contents of the buffer is
 undefined. This can only occur with the routine
 .IR flt_flt2str .
+.PP
+The routine
+.I flt_b64_sft
+shifts the mantissa
+.I m
+.I |n|
+bits left or right, depending on the sign of
+.IR n .
+If
+.I n
+is negative, it is a left-shift; If
+.I n
+is positive, it is a right shift.
+.PP
+The routine
+.I flt_b64_rsft
+shifts the mantissa
+.I m
+1 bit right.
+.PP
+The routine
+.I flt_b64_lsft
+shifts the mantissa
+.I m
+1 bit left.
 .SH FILES
 ~em/modules/h/flt_arith.h
 .br

+ 8 - 10
modules/src/flt_arith/flt_arith.h

@@ -7,18 +7,16 @@
 #ifndef __FLT_INCLUDED__
 #define __FLT_INCLUDED__
 
-struct _mantissa {
-	long	flt_h_32;
-	long	flt_l_32;
+struct flt_mantissa {
+	long	flt_h_32;	/* high order 32 bits of mantissa */
+	long	flt_l_32;	/* low order 32 bits of mantissa */
 };
 
-struct _EXTEND {
-	short	flt_sign;
-	short	flt_exp;
-	struct _mantissa flt_mantissa;
-};
-
-typedef struct _EXTEND flt_arith;
+typedef struct {
+	short	flt_sign;	/* 0 for positive, 1 for negative */
+	short	flt_exp;	/* between -16384 and 16384 */
+	struct flt_mantissa flt_mantissa;	/* normalized, in [1,2). */
+} flt_arith;
 
 extern int	flt_status;
 #define FLT_OVFL	001

+ 3 - 3
modules/src/flt_arith/flt_div.c

@@ -10,7 +10,6 @@
 flt_div(e1,e2,e3)
 	register flt_arith *e1,*e2,*e3;
 {
-	short	error = 0;
 	long	result[2];
 	register long	*lp;
 	unsigned short u[9], v[5];
@@ -35,7 +34,7 @@ flt_div(e1,e2,e3)
 	e3->flt_exp = e1->flt_exp - e2->flt_exp + 2;
 
 	u[4] = (e1->m2 & 1) << 15;
-	b64_rsft(&(e1->flt_mantissa));
+	flt_b64_rsft(&(e1->flt_mantissa));
 	u[0] = (e1->m1 >> 16) & 0xFFFF;
 	u[1] = e1->m1 & 0xFFFF;
 	u[2] = (e1->m2 >> 16) & 0xFFFF;
@@ -80,7 +79,8 @@ flt_div(e1,e2,e3)
 			}
 		}
 		temp -= q_est * v[1];
-		while (!(temp&0xFFFF0000) && ucmp(v[2]*q_est,((temp<<16)+u_p[2])) > 0) {
+		while (!(temp&0xFFFF0000) &&
+		       ucmp((long)(v[2]*q_est),(long)((temp<<16)+u_p[2])) > 0) {
 			q_est--;
 			temp += v[1];
 		}

+ 2 - 2
modules/src/flt_arith/flt_flt2ar.c

@@ -15,7 +15,7 @@ flt_flt2arith(e)
 	/*	Convert the flt_arith "n" to an arith.
 	*/
 	arith	n;
-	struct _mantissa a;
+	struct flt_mantissa a;
 
 	flt_status = 0;
 	if (e->flt_exp < 0) {
@@ -48,7 +48,7 @@ flt_flt2arith(e)
 	}
 	
 	a = e->flt_mantissa;
-	b64_sft(&a, 63-e->flt_exp);
+	flt_b64_sft(&a, 63-e->flt_exp);
 	n = a.flt_l_32 | ((a.flt_h_32 << 16) << 16);
 	/* not << 32; this could be an undefined operation */
 	return n;

+ 2 - 2
modules/src/flt_arith/flt_modf.c

@@ -26,7 +26,7 @@ flt_modf(e, ipart, fpart)
 	}
 	*ipart = *e;
 	/* "loose" low order bits */
-	b64_sft(&(ipart->flt_mantissa), 63 - e->flt_exp);
-	b64_sft(&(ipart->flt_mantissa), e->flt_exp - 63);
+	flt_b64_sft(&(ipart->flt_mantissa), 63 - e->flt_exp);
+	flt_b64_sft(&(ipart->flt_mantissa), e->flt_exp - 63);
 	flt_sub(e, ipart, fpart);
 }

+ 3 - 2
modules/src/flt_arith/flt_mul.c

@@ -72,8 +72,9 @@ flt_mul(e1,e2,e3)
 	e3->m1 = ((long)result[0] << 16) + result[1];
 	e3->m2 = ((long)result[2] << 16) + result[3];
 	if (result[4] & 0x8000) {
-		if (++e3->m2 == 0) {
-			if (++e3->m1 == 0) {
+		if (++e3->m2 == 0 || (e3->m2 & ~ 0xFFFFFFFF)) {
+			e3->m2 = 0;
+			if (++e3->m1 == 0 || (e3->m1 & ~ 0xFFFFFFFF)) {
 				e3->m1 = 0x80000000;
 				e3->flt_exp++;
 			}

+ 1 - 1
modules/src/flt_arith/flt_nrm.c

@@ -29,6 +29,6 @@ flt_nrm(e)
 			cnt--;
 		}
 		e->flt_exp += cnt;
-		b64_sft(&(e->flt_mantissa), cnt);
+		flt_b64_sft(&(e->flt_mantissa), cnt);
 	}
 }

+ 39 - 38
modules/src/flt_arith/flt_str2fl.c

@@ -5,6 +5,7 @@
 
 /* $Header$ */
 
+#include <ctype.h>
 #include "misc.h"
 
 /* The following tables can be computed with the following bc(1)
@@ -189,6 +190,9 @@ static flt_arith r_big_10pow[] = { /* representation of 10 ** -(28*i) */
 	{ 0,	-1768,	0xD4EEC394,	0xD6258BF8 }
 };
 
+#define BIGSZ	(sizeof(big_10pow)/sizeof(big_10pow[0]))
+#define SMALLSZ	(sizeof(s10pow)/sizeof(s10pow[0]))
+
 static
 add_exponent(e, exp)
 	register flt_arith *e;
@@ -198,13 +202,13 @@ add_exponent(e, exp)
 	flt_arith x;
 
 	if (neg) exp = -exp;
-	divsz = exp / (sizeof(s10pow)/sizeof(s10pow[0]));
-	modsz = exp % (sizeof(s10pow)/sizeof(s10pow[0]));
+	divsz = exp / SMALLSZ;
+	modsz = exp % SMALLSZ;
 	if (neg) {
 		flt_mul(e, &r_10pow[modsz], &x);
-		while (divsz >= sizeof(r_big_10pow)/sizeof(r_big_10pow[0])) {
-			flt_mul(&x, &r_big_10pow[sizeof(r_big_10pow)/sizeof(r_big_10pow[0])-1],&x);
-			divsz -= sizeof(r_big_10pow)/sizeof(r_big_10pow[0])-1;
+		while (divsz >= BIGSZ) {
+			flt_mul(&x, &r_big_10pow[BIGSZ-1],&x);
+			divsz -= BIGSZ-1;
 			flt_chk(e);
 			if (flt_status != 0) return;
 		}
@@ -212,9 +216,9 @@ add_exponent(e, exp)
 	}
 	else {
 		flt_mul(e, &s10pow[modsz], &x);
-		while (divsz >= sizeof(big_10pow)/sizeof(big_10pow[0])) {
-			flt_mul(&x, &big_10pow[sizeof(big_10pow)/sizeof(big_10pow[0])-1],&x);
-			divsz -= sizeof(big_10pow)/sizeof(big_10pow[0])-1;
+		while (divsz >= BIGSZ) {
+			flt_mul(&x, &big_10pow[BIGSZ-1],&x);
+			divsz -= BIGSZ-1;
 			flt_chk(e);
 			if (flt_status != 0) return;
 		}
@@ -250,15 +254,15 @@ flt_str2flt(s, e)
 		if (c == '.') continue;
 		digitseen = 1;
 		if (e->m1 <= 0x7FFFFFFF/5) {
-			struct _mantissa	a1;
+			struct flt_mantissa	a1;
 
 			a1 = e->flt_mantissa;
-			b64_sft(&(e->flt_mantissa), -3);
-			b64_sft(&a1, -1);
-			b64_add(&(e->flt_mantissa), &a1);
+			flt_b64_sft(&(e->flt_mantissa), -3);
+			flt_b64_lsft(&a1);
+			flt_b64_add(&(e->flt_mantissa), &a1);
 			a1.flt_h_32 = 0;
 			a1.flt_l_32 = c - '0';
-			b64_add(&(e->flt_mantissa), &a1);
+			flt_b64_add(&(e->flt_mantissa), &a1);
 		}
 		else exp++;
 		if (dotseen) exp--;
@@ -318,8 +322,6 @@ flt_ecvt(e, ndigit, decpt, sign)
 
 	*decpt = 0;
 	if (e->m1 != 0) {
-#define BIGSZ	(sizeof(big_10pow)/sizeof(big_10pow[0]))
-#define SMALLSZ	(sizeof(s10pow)/sizeof(s10pow[0]))
 		register flt_arith *pp = &big_10pow[1];
 
 		while (flt_cmp(e, &big_10pow[BIGSZ-1]) >= 0) {
@@ -333,18 +335,18 @@ flt_ecvt(e, ndigit, decpt, sign)
 		flt_mul(e,&r_big_10pow[pp-big_10pow],e);
 		*decpt += (pp - big_10pow)*SMALLSZ;
 		pp = &s10pow[1];
-		while (pp < &s10pow[SMALLSZ] && cmp_ext(e, pp) >= 0) pp++;
+		while (pp < &s10pow[SMALLSZ] && flt_cmp(e, pp) >= 0) pp++;
 		pp--;
 		flt_mul(e, &r_10pow[pp-s10pow], e);
 		*decpt += pp - s10pow;
 
-		if (cmp_ext(e, &s10pow[0]) < 0) {
+		if (flt_cmp(e, &s10pow[0]) < 0) {
                 	while (flt_cmp(e, &r_big_10pow[BIGSZ-1]) < 0) { 
                         	flt_mul(e,&big_10pow[BIGSZ-1],e);
                         	*decpt -= (BIGSZ-1)*SMALLSZ;
                 	}
 			pp = &r_big_10pow[1];
-			while(cmp_ext(e,pp) < 0) pp++;
+			while(flt_cmp(e,pp) < 0) pp++;
 			pp--;
 			flt_mul(e,&big_10pow[pp-r_big_10pow],e);
 			*decpt -= (pp - r_big_10pow)*SMALLSZ;
@@ -352,7 +354,7 @@ flt_ecvt(e, ndigit, decpt, sign)
 			flt_mul(e, &s10pow[1], e);
 			(*decpt)--;
 			pp = &r_10pow[0];
-			while(cmp_ext(e, pp) < 0) pp++;
+			while(flt_cmp(e, pp) < 0) pp++;
 			flt_mul(e, &s10pow[pp-r_10pow], e);
 			*decpt -= pp - r_10pow;
 		}
@@ -364,10 +366,11 @@ flt_ecvt(e, ndigit, decpt, sign)
 
 			x.m2 = 0; x.flt_exp = e->flt_exp;
 			x.flt_sign = 1;
-			x.m1 = e->m1>>(31-e->flt_exp);
+			x.m1 = (e->m1 >> 1) & 0x7FFFFFFF;
+			x.m1 = x.m1>>(30-e->flt_exp);
 			*p++ = (x.m1) + '0';
 			x.m1 = x.m1 << (31-e->flt_exp);
-			add_ext(e, &x, e);
+			flt_add(e, &x, e);
 		}
 		else *p++ = '0';
 		flt_mul(e, &s10pow[1], e);
@@ -393,7 +396,7 @@ flt_flt2str(e, buf, bufsize)
 	char *buf;
 {
 
-#define NDIG	25
+#define NDIG	20
 	int sign, dp;
 	register int i;
 	register char *s1;
@@ -413,23 +416,21 @@ flt_flt2str(e, buf, bufsize)
         if ((e->m1 | e->m2) || e->flt_exp == EXT_MIN || e->flt_exp == EXT_MAX) {
 		--dp ;
 	}
-	if (dp == 0) {
-		*s = 0;
-		return;
+	if (dp != 0) {
+        	*s++ = 'e';
+        	if ( dp<0 ) {
+                	*s++ = '-' ; dp= -dp ;
+        	} else {
+                	*s++ = '+' ;
+        	}
+		s1 = &Xbuf[NDIG+12];
+		*--s1 = '\0';
+		do {
+			*--s1 = dp % 10 + '0';
+			dp /= 10;
+		} while (dp != 0);
+		while (*s1) *s++ = *s1++;
 	}
-        *s++ = 'e';
-        if ( dp<0 ) {
-                *s++ = '-' ; dp= -dp ;
-        } else {
-                *s++ = '+' ;
-        }
-	s1 = &Xbuf[NDIG+12];
-	*--s1 = '\0';
-	do {
-		*--s1 = dp % 10 + '0';
-		dp /= 10;
-	} while (dp != 0);
-	while (*s1) *s++ = *s1++;
 	*s++ = '\0';
 	if (s - Xbuf > bufsize) {
 		flt_status = FLT_BTSM;

+ 1 - 4
modules/src/flt_arith/misc.h

@@ -18,7 +18,4 @@
 #define ucmp		flt__ucmp
 #define flt_nrm		flt__nrm
 #define flt_chk		flt__chk
-#define b64_add		flt__64add
-#define b64_sft		flt__64sft
-#define b64_rsft	flt__64rsft
-#define b64_lsft	flt__64lsft
+#define flt_b64_add	flt__64add

+ 1 - 1
modules/src/flt_arith/ucmp.c

@@ -13,7 +13,7 @@ ucmp(l1,l2)
 {
 	if (l1 == l2) return 0;
 	if (l2 >= 0) {
-		if (l1 > l2) return 1;
+		if (l1 > l2 || l1 < 0) return 1;
 		return -1;
 	}
 	if (l1 > 0 || l1 < l2) return -1;