ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pdrscl.f
Go to the documentation of this file.
1  SUBROUTINE pdrscl( N, SA, SX, IX, JX, DESCX, INCX )
2 *
3 * -- ScaLAPACK auxiliary routine (version 1.7) --
4 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
5 * and University of California, Berkeley.
6 * May 1, 1997
7 *
8 * .. Scalar Arguments ..
9  INTEGER IX, INCX, JX, N
10  DOUBLE PRECISION SA
11 * ..
12 * .. Array Arguments ..
13  INTEGER DESCX( * )
14  DOUBLE PRECISION SX( * )
15 * ..
16 *
17 * Purpose
18 * =======
19 *
20 * PDRSCL multiplies an N-element real distributed vector sub( X ) by
21 * the real scalar 1/a. This is done without overflow or underflow as
22 * long as the final result sub( X )/a does not overflow or underflow.
23 *
24 * where sub( X ) denotes X(IX:IX+N-1,JX:JX), if INCX = 1,
25 * X(IX:IX,JX:JX+N-1), if INCX = M_X.
26 *
27 * Notes
28 * =====
29 *
30 * Each global data object is described by an associated description
31 * vector. This vector stores the information required to establish
32 * the mapping between an object element and its corresponding process
33 * and memory location.
34 *
35 * Let A be a generic term for any 2D block cyclicly distributed array.
36 * Such a global array has an associated description vector descA.
37 * In the following comments, the character _ should be read as
38 * "of the global array".
39 *
40 * NOTATION STORED IN EXPLANATION
41 * --------------- -------------- --------------------------------------
42 * DT_A (global) descA[ DT_ ] The descriptor type. In this case,
43 * DT_A = 1.
44 * CTXT_A (global) descA[ CTXT_ ] The BLACS context handle, indicating
45 * the BLACS process grid A is distribu-
46 * ted over. The context itself is glo-
47 * bal, but the handle (the integer
48 * value) may vary.
49 * M_A (global) descA[ M_ ] The number of rows in the global
50 * array A.
51 * N_A (global) descA[ N_ ] The number of columns in the global
52 * array A.
53 * MB_A (global) descA[ MB_ ] The blocking factor used to distribu-
54 * te the rows of the array.
55 * NB_A (global) descA[ NB_ ] The blocking factor used to distribu-
56 * te the columns of the array.
57 * RSRC_A (global) descA[ RSRC_ ] The process row over which the first
58 * row of the array A is distributed.
59 * CSRC_A (global) descA[ CSRC_ ] The process column over which the
60 * first column of the array A is
61 * distributed.
62 * LLD_A (local) descA[ LLD_ ] The leading dimension of the local
63 * array. LLD_A >= MAX(1,LOCr(M_A)).
64 *
65 * Let K be the number of rows or columns of a distributed matrix,
66 * and assume that its process grid has dimension p x q.
67 * LOCr( K ) denotes the number of elements of K that a process
68 * would receive if K were distributed over the p processes of its
69 * process column.
70 * Similarly, LOCc( K ) denotes the number of elements of K that a
71 * process would receive if K were distributed over the q processes of
72 * its process row.
73 * The values of LOCr() and LOCc() may be determined via a call to the
74 * ScaLAPACK tool function, NUMROC:
75 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
76 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
77 * An upper bound for these quantities may be computed by:
78 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
79 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
80 *
81 * Because vectors may be seen as particular matrices, a distributed
82 * vector is considered to be a distributed matrix.
83 *
84 * Arguments
85 * =========
86 *
87 * N (global input) pointer to INTEGER
88 * The number of components of the distributed vector sub( X ).
89 * N >= 0.
90 *
91 * SA (global input) DOUBLE PRECISION
92 * The scalar a which is used to divide each component of
93 * sub( X ). SA must be >= 0, or the subroutine will divide by
94 * zero.
95 *
96 * SX (local input/local output) DOUBLE PRECISION array
97 * containing the local pieces of a distributed matrix of
98 * dimension of at least
99 * ( (JX-1)*M_X + IX + ( N - 1 )*abs( INCX ) )
100 * This array contains the entries of the distributed vector
101 * sub( X ).
102 *
103 * IX (global input) pointer to INTEGER
104 * The global row index of the submatrix of the distributed
105 * matrix X to operate on.
106 *
107 * JX (global input) pointer to INTEGER
108 * The global column index of the submatrix of the distributed
109 * matrix X to operate on.
110 *
111 * DESCX (global and local input) INTEGER array of dimension 8.
112 * The array descriptor of the distributed matrix X.
113 *
114 * INCX (global input) pointer to INTEGER
115 * The global increment for the elements of X. Only two values
116 * of INCX are supported in this version, namely 1 and M_X.
117 *
118 * =====================================================================
119 *
120 * .. Parameters ..
121  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
122  $ LLD_, MB_, M_, NB_, N_, RSRC_
123  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
124  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
125  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
126  DOUBLE PRECISION ONE, ZERO
127  parameter( one = 1.0d+0, zero = 0.0d+0 )
128 * ..
129 * .. Local Scalars ..
130  LOGICAL DONE
131  INTEGER ICTXT, MYCOL, MYROW, NPCOL, NPROW
132  DOUBLE PRECISION BIGNUM, CDEN, CDEN1, CNUM, CNUM1, MUL, SMLNUM
133 * ..
134 * .. External Subroutines ..
135  EXTERNAL blacs_gridinfo, pdlabad, pdscal
136 * ..
137 * .. External Functions ..
138  DOUBLE PRECISION PDLAMCH
139  EXTERNAL pdlamch
140 * ..
141 * .. Intrinsic Functions ..
142  INTRINSIC abs
143 * ..
144 * .. Executable Statements ..
145 *
146 * Get grid parameters
147 *
148  ictxt = descx( ctxt_ )
149  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
150 *
151 * Quick return if possible
152 *
153  IF( n.LE.0 )
154  $ RETURN
155 *
156 * Get machine parameters
157 *
158  smlnum = pdlamch( ictxt, 'S' )
159  bignum = one / smlnum
160  CALL pdlabad( ictxt, smlnum, bignum )
161 *
162 * Initialize the denominator to SA and the numerator to 1.
163 *
164  cden = sa
165  cnum = one
166 *
167  10 CONTINUE
168  cden1 = cden*smlnum
169  cnum1 = cnum / bignum
170  IF( abs( cden1 ).GT.abs( cnum ) .AND. cnum.NE.zero ) THEN
171 *
172 * Pre-multiply sub( X ) by SMLNUM if CDEN is large compared to
173 * CNUM.
174 *
175  mul = smlnum
176  done = .false.
177  cden = cden1
178  ELSE IF( abs( cnum1 ).GT.abs( cden ) ) THEN
179 *
180 * Pre-multiply sub( X ) by BIGNUM if CDEN is small compared to
181 * CNUM.
182 *
183  mul = bignum
184  done = .false.
185  cnum = cnum1
186  ELSE
187 *
188 * Multiply sub( X ) by CNUM / CDEN and return.
189 *
190  mul = cnum / cden
191  done = .true.
192  END IF
193 *
194 * Scale the vector sub( X ) by MUL
195 *
196  CALL pdscal( n, mul, sx, ix, jx, descx, incx )
197 *
198  IF( .NOT.done )
199  $ GO TO 10
200 *
201  RETURN
202 *
203 * End of PDRSCL
204 *
205  END
pdlabad
subroutine pdlabad(ICTXT, SMALL, LARGE)
Definition: pdlabad.f:2
pdrscl
subroutine pdrscl(N, SA, SX, IX, JX, DESCX, INCX)
Definition: pdrscl.f:2