ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pzdtlaschk.f
Go to the documentation of this file.
1  SUBROUTINE pzdtlaschk( SYMM, UPLO, TRANS, N, BWL, BWU, NRHS, X,
2  $ IX, JX, DESCX, IASEED, A, IA, JA, DESCA,
3  $ IBSEED, ANORM, RESID, WORK, WORKSIZ )
4 *
5 *
6 * -- ScaLAPACK routine (version 1.7) --
7 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
8 * and University of California, Berkeley.
9 * November 15, 1997
10 *
11 * .. Scalar Arguments ..
12  CHARACTER SYMM, TRANS, UPLO
13  INTEGER BWL, BWU, IA, IASEED, IBSEED,
14  $ ix, ja, jx, n, nrhs, worksiz
15  DOUBLE PRECISION ANORM, RESID
16 * ..
17 * .. Array Arguments ..
18  INTEGER DESCA( * ), DESCX( * )
19  COMPLEX*16 A( * ), WORK( * ), X( * )
20 * .. External Functions ..
21  LOGICAL LSAME
22 * ..
23 *
24 * Purpose
25 * =======
26 *
27 * PZDTLASCHK computes the residual
28 * || sub( A )*sub( X ) - B || / (|| sub( A ) ||*|| sub( X ) ||*eps*N)
29 * to check the accuracy of the factorization and solve steps in the
30 * LU and Cholesky decompositions, where sub( A ) denotes
31 * A(IA:IA+N-1,JA,JA+N-1), sub( X ) denotes X(IX:IX+N-1, JX:JX+NRHS-1).
32 *
33 * Notes
34 * =====
35 *
36 * Each global data object is described by an associated description
37 * vector. This vector stores the information required to establish
38 * the mapping between an object element and its corresponding process
39 * and memory location.
40 *
41 * Let A be a generic term for any 2D block cyclicly distributed array.
42 * Such a global array has an associated description vector DESCA.
43 * In the following comments, the character _ should be read as
44 * "of the global array".
45 *
46 * NOTATION STORED IN EXPLANATION
47 * --------------- -------------- --------------------------------------
48 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
49 * DTYPE_A = 1.
50 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
51 * the BLACS process grid A is distribu-
52 * ted over. The context itself is glo-
53 * bal, but the handle (the integer
54 * value) may vary.
55 * M_A (global) DESCA( M_ ) The number of rows in the global
56 * array A.
57 * N_A (global) DESCA( N_ ) The number of columns in the global
58 * array A.
59 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
60 * the rows of the array.
61 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
62 * the columns of the array.
63 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
64 * row of the array A is distributed.
65 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
66 * first column of the array A is
67 * distributed.
68 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
69 * array. LLD_A >= MAX(1,LOCr(M_A)).
70 *
71 * Let K be the number of rows or columns of a distributed matrix,
72 * and assume that its process grid has dimension p x q.
73 * LOCr( K ) denotes the number of elements of K that a process
74 * would receive if K were distributed over the p processes of its
75 * process column.
76 * Similarly, LOCc( K ) denotes the number of elements of K that a
77 * process would receive if K were distributed over the q processes of
78 * its process row.
79 * The values of LOCr() and LOCc() may be determined via a call to the
80 * ScaLAPACK tool function, NUMROC:
81 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
82 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
83 * An upper bound for these quantities may be computed by:
84 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
85 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
86 *
87 * Arguments
88 * =========
89 *
90 * SYMM (global input) CHARACTER
91 * if SYMM = 'H', sub( A ) is a hermitian distributed band
92 * matrix, otherwise sub( A ) is a general distributed matrix.
93 *
94 * UPLO (global input) CHARACTER
95 * if SYMM = 'H', then
96 * if UPLO = 'L', the lower half of the matrix is stored
97 * if UPLO = 'U', the upper half of the matrix is stored
98 * if SYMM != 'S' or 'H', then
99 * if UPLO = 'D', the matrix is stable during factorization
100 * without interchanges
101 * if UPLO != 'D', the matrix is general
102 *
103 * TRANS if TRANS= 'C', A 'Conjugate transpose' is used as the
104 * coefficient matrix in the solve.
105 *
106 * N (global input) INTEGER
107 * The number of columns to be operated on, i.e. the number of
108 * columns of the distributed submatrix sub( A ). N >= 0.
109 *
110 * NRHS (global input) INTEGER
111 * The number of right-hand-sides, i.e the number of columns
112 * of the distributed matrix sub( X ). NRHS >= 1.
113 *
114 * X (local input) COMPLEX*16 pointer into the local memory
115 * to an array of dimension (LLD_X,LOCq(JX+NRHS-1). This array
116 * contains the local pieces of the answer vector(s) sub( X ) of
117 * sub( A ) sub( X ) - B, split up over a column of processes.
118 *
119 * IX (global input) INTEGER
120 * The row index in the global array X indicating the first
121 * row of sub( X ).
122 *
123 * DESCX (global and local input) INTEGER array of dimension DLEN_.
124 * The array descriptor for the distributed matrix X.
125 *
126 * IASEED (global input) INTEGER
127 * The seed number to generate the original matrix Ao.
128 *
129 * JA (global input) INTEGER
130 * The column index in the global array A indicating the
131 * first column of sub( A ).
132 *
133 * DESCA (global and local input) INTEGER array of dimension DLEN_.
134 * The array descriptor for the distributed matrix A.
135 *
136 * IBSEED (global input) INTEGER
137 * The seed number to generate the original matrix B.
138 *
139 * ANORM (global input) DOUBLE PRECISION
140 * The 1-norm or infinity norm of the distributed matrix
141 * sub( A ).
142 *
143 * RESID (global output) DOUBLE PRECISION
144 * The residual error:
145 * ||sub( A )*sub( X )-B|| / (||sub( A )||*||sub( X )||*eps*N).
146 *
147 * WORK (local workspace) COMPLEX*16 array, dimension (LWORK)
148 * IF SYMM='S'
149 * LWORK >= max(5,NB)+2*NB
150 * IF SYMM!='S' or 'H'
151 * LWORK >= max(5,NB)+2*NB
152 *
153 * WORKSIZ (local input) size of WORK.
154 *
155 * =====================================================================
156 *
157 * Code Developer: Andrew J. Cleary, University of Tennessee.
158 * Current address: Lawrence Livermore National Labs.
159 * This version released: August, 2001.
160 *
161 * =====================================================================
162 *
163 * .. Parameters ..
164  COMPLEX*16 ZERO, ONE
165  PARAMETER ( ONE = ( 1.0d+0, 0.0d+0 ),
166  $ zero = ( 0.0d+0, 0.0d+0 ) )
167  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
168  $ LLD_, MB_, M_, NB_, N_, RSRC_
169  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
170  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
171  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
172  INTEGER INT_ONE
173  PARAMETER ( INT_ONE = 1 )
174 * ..
175 * .. Local Scalars ..
176  INTEGER IACOL, IAROW, ICTXT,
177  $ IIA, IIX, IPB, IPW,
178  $ ixcol, ixrow, j, jja, jjx, lda,
179  $ mycol, myrow, nb, np, npcol, nprow, nq
180  INTEGER I, START
181  INTEGER BW, INFO, IPPRODUCT, WORK_MIN
182  DOUBLE PRECISION DIVISOR, EPS, RESID1, NORMX
183 * ..
184 * .. Local Arrays ..
185 * ..
186 * .. External Subroutines ..
187  EXTERNAL blacs_gridinfo, dgebr2d, dgebs2d,
188  $ dgerv2d, dgesd2d, pbztran,
189  $ pzmatgen, zgamx2d, zgemm, zgsum2d,
190  $ zlaset
191 * ..
192 * .. External Functions ..
193  INTEGER IZAMAX, NUMROC
194  DOUBLE PRECISION PDLAMCH
195  EXTERNAL izamax, numroc, pdlamch
196 * ..
197 * .. Intrinsic Functions ..
198  INTRINSIC abs, dble, max, min, mod
199 * ..
200 * .. Executable Statements ..
201 *
202 * Get needed initial parameters
203 *
204  ictxt = desca( ctxt_ )
205  nb = desca( nb_ )
206 *
207  IF( lsame( symm, 'H' ) ) THEN
208  bw = bwl
209  start = 1
210  work_min = max(5,nb)+2*nb
211  ELSE
212  bw = max(bwl, bwu)
213  IF( lsame( uplo, 'D' )) THEN
214  start = 1
215  ELSE
216  start = 2
217  ENDIF
218  work_min = max(5,nb)+2*nb
219  ENDIF
220 *
221  IF ( worksiz .LT. work_min ) THEN
222  CALL pxerbla( ictxt, 'PZTLASCHK', -18 )
223  RETURN
224  END IF
225 *
226  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
227 *
228  eps = pdlamch( ictxt, 'eps' )
229  resid = 0.0d+0
230  divisor = anorm * eps * dble( n )
231 *
232  CALL infog2l( ia, ja, desca, nprow, npcol, myrow, mycol, iia, jja,
233  $ iarow, iacol )
234  CALL infog2l( ix, jx, descx, nprow, npcol, myrow, mycol, iix, jjx,
235  $ ixrow, ixcol )
236  np = numroc( (3), desca( mb_ ), myrow, 0, nprow )
237  nq = numroc( n, desca( nb_ ), mycol, 0, npcol )
238 *
239  ipb = 1
240  ipproduct = 1 + desca( nb_ )
241  ipw = 1 + 2*desca( nb_ )
242 *
243  lda = desca( lld_ )
244 *
245 * Regenerate A
246 *
247  IF( lsame( symm, 'H' )) THEN
248  CALL pzbmatgen( ictxt, uplo, 'D', bw, bw, n, bw+1,
249  $ desca( nb_ ), a, desca( lld_ ), 0, 0,
250  $ iaseed, myrow, mycol, nprow, npcol )
251  ELSE
252 *
253  CALL pzbmatgen( ictxt, 'N', uplo, bwl, bwu, n,
254  $ desca( mb_ ), desca( nb_ ), a,
255  $ desca( lld_ ), 0, 0, iaseed, myrow,
256  $ mycol, nprow, npcol )
257  ENDIF
258 *
259 * Matrix formed above has the diagonals shifted from what was
260 * input to the tridiagonal routine. Shift them back.
261 *
262 * Send elements to neighboring processors
263 *
264  IF( mycol.GT.0 ) THEN
265  CALL zgesd2d( ictxt, 1, 1, a( start+2), lda,
266  $ myrow, mycol-1 )
267  ENDIF
268 *
269  IF( mycol.LT.npcol-1 ) THEN
270  CALL zgesd2d( ictxt, 1, 1,
271  $ a( start+( desca( nb_ )-1 )*lda ),
272  $ lda, myrow, mycol+1 )
273  ENDIF
274 *
275 * Shift local elements
276 *
277  DO 220 i=0,desca( nb_ )-1
278  a( start+2+(i)*lda ) = a( start+2+(i+1)*lda )
279  220 CONTINUE
280 *
281  DO 230 i=desca( nb_ )-1,0,-1
282  a( start+(i+1)*lda ) = a( start+(i)*lda )
283  230 CONTINUE
284 *
285 * Receive elements from neighboring processors
286 *
287  IF( mycol.LT.npcol-1 ) THEN
288  CALL zgerv2d( ictxt, 1, 1,
289  $ a( start+2+( desca( nb_ )-1 )*lda ),
290  $ lda, myrow, mycol+1 )
291  ENDIF
292 *
293  IF( mycol.GT.0 ) THEN
294  CALL zgerv2d( ictxt, 1, 1, a( start), lda,
295  $ myrow, mycol-1 )
296  ENDIF
297 *
298 * Loop over the rhs
299 *
300  resid = 0.0
301 *
302  DO 40 j = 1, nrhs
303 *
304 * Multiply A * current column of X
305 *
306 *
307  CALL pzgbdcmv( bwl+bwu+1, bwl, bwu, trans, n, a, 1, desca,
308  $ 1, x( 1 + (j-1)*descx( lld_ )), 1, descx,
309  $ work( ipproduct ), work( ipw ),
310  $ (int_one+2)*int_one, info )
311 *
312 *
313 * Regenerate column of B
314 *
315  CALL pzmatgen( descx( ctxt_ ), 'No', 'No', descx( m_ ),
316  $ descx( n_ ), descx( mb_ ), descx( nb_ ),
317  $ work( ipb ), descx( lld_ ), descx( rsrc_ ),
318  $ descx( csrc_ ), ibseed, 0, nq, j-1, 1, mycol,
319  $ myrow, npcol, nprow )
320 *
321 * Figure || A * X - B || & || X ||
322 *
323  CALL pzaxpy( n, -one, work( ipproduct ), 1, 1, descx, 1,
324  $ work( ipb ), 1, 1, descx, 1 )
325 *
326  CALL pdznrm2( n, normx,
327  $ x, 1, j, descx, 1 )
328 *
329  CALL pdznrm2( n, resid1,
330  $ work( ipb ), 1, 1, descx, 1 )
331 *
332 *
333 * Calculate residual = ||Ax-b|| / (||x||*||A||*eps*N)
334 *
335  resid1 = resid1 / ( normx*divisor )
336 *
337  resid = max( resid, resid1 )
338 *
339  40 CONTINUE
340 *
341  RETURN
342 *
343 * End of PZTLASCHK
344 *
345  END
pbztran
subroutine pbztran(ICONTXT, ADIST, TRANS, M, N, NB, A, LDA, BETA, C, LDC, IAROW, IACOL, ICROW, ICCOL, WORK)
Definition: pbztran.f:3
max
#define max(A, B)
Definition: pcgemr.c:180
infog2l
subroutine infog2l(GRINDX, GCINDX, DESC, NPROW, NPCOL, MYROW, MYCOL, LRINDX, LCINDX, RSRC, CSRC)
Definition: infog2l.f:3
pzdtlaschk
subroutine pzdtlaschk(SYMM, UPLO, TRANS, N, BWL, BWU, NRHS, X, IX, JX, DESCX, IASEED, A, IA, JA, DESCA, IBSEED, ANORM, RESID, WORK, WORKSIZ)
Definition: pzdtlaschk.f:4
pzgbdcmv
subroutine pzgbdcmv(LDBW, BWL, BWU, TRANS, N, A, JA, DESCA, NRHS, B, IB, DESCB, X, WORK, LWORK, INFO)
Definition: pzdbmv1.f:3
pzbmatgen
subroutine pzbmatgen(ICTXT, AFORM, AFORM2, BWL, BWU, N, MB, NB, A, LDA, IAROW, IACOL, ISEED, MYROW, MYCOL, NPROW, NPCOL)
Definition: pzbmatgen.f:5
pzmatgen
subroutine pzmatgen(ICTXT, AFORM, DIAG, M, N, MB, NB, A, LDA, IAROW, IACOL, ISEED, IROFF, IRNUM, ICOFF, ICNUM, MYROW, MYCOL, NPROW, NPCOL)
Definition: pzmatgen.f:4
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181