ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pdsepchk.f
Go to the documentation of this file.
1 *
2 *
3  SUBROUTINE pdsepchk( MS, NV, A, IA, JA, DESCA, EPSNORMA, THRESH,
4  $ Q, IQ, JQ, DESCQ, C, IC, JC, DESCC, W, WORK,
5  $ LWORK, TSTNRM, RESULT )
6 *
7 * -- ScaLAPACK routine (version 2.0.2) --
8 * Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver
9 * May 1 2012
10 *
11 * .. Scalar Arguments ..
12  INTEGER IA, IC, IQ, JA, JC, JQ, LWORK, MS, NV, RESULT
13  DOUBLE PRECISION EPSNORMA, THRESH, TSTNRM
14 * ..
15 * .. Array Arguments ..
16 *
17  INTEGER DESCA( * ), DESCC( * ), DESCQ( * )
18  DOUBLE PRECISION A( * ), C( * ), Q( * ), W( * ), WORK( * )
19 * ..
20 *
21 * Purpose
22 * =======
23 *
24 * Compute |AQ- QL| / (EPSNORMA * N)
25 * where EPSNORMA = (abstol + eps)*norm(A) when called by pdsqpsubtst.
26 *
27 * Notes
28 * =====
29 *
30 *
31 * Each global data object is described by an associated description
32 * vector. This vector stores the information required to establish
33 * the mapping between an object element and its corresponding process
34 * and memory location.
35 *
36 * Let A be a generic term for any 2D block cyclicly distributed array.
37 * Such a global array has an associated description vector DESCA.
38 * In the following comments, the character _ should be read as
39 * "of the global array".
40 *
41 * NOTATION STORED IN EXPLANATION
42 * --------------- -------------- --------------------------------------
43 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
44 * DTYPE_A = 1.
45 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
46 * the BLACS process grid A is distribu-
47 * ted over. The context itself is glo-
48 * bal, but the handle (the integer
49 * value) may vary.
50 * M_A (global) DESCA( M_ ) The number of rows in the global
51 * array A.
52 * N_A (global) DESCA( N_ ) The number of columns in the global
53 * array A.
54 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
55 * the rows of the array.
56 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
57 * the columns of the array.
58 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
59 * row of the array A is distributed.
60 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
61 * first column of the array A is
62 * distributed.
63 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
64 * array. LLD_A >= MAX(1,LOCr(M_A)).
65 *
66 * Let K be the number of rows or columns of a distributed matrix,
67 * and assume that its process grid has dimension p x q.
68 * LOCr( K ) denotes the number of elements of K that a process
69 * would receive if K were distributed over the p processes of its
70 * process column.
71 * Similarly, LOCc( K ) denotes the number of elements of K that a
72 * process would receive if K were distributed over the q processes of
73 * its process row.
74 * The values of LOCr() and LOCc() may be determined via a call to the
75 * ScaLAPACK tool function, NUMROC:
76 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
77 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
78 * An upper bound for these quantities may be computed by:
79 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
80 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
81 *
82 *
83 * Arguments
84 * =========
85 *
86 * MP = number of local rows in A, C and Q
87 * MQ = number of local columns in A
88 * NQ = number of local columns in C and Q
89 *
90 * MS (global input) INTEGER
91 * Matrix size.
92 * The number of global rows in A, C and Q
93 * Also, the number of global columns in A
94 *
95 * NV (global input) INTEGER
96 * Number of eigenvectors
97 * The number of global columns in C and Q.
98 *
99 * A (local input) DOUBLE PRECISION pointer to an
100 * array in local memory of dimension (LLD_A, LOCc(JA+N-1)).
101 * This array contains the local pieces of the MS-by-MS
102 * distributed test matrix A
103 *
104 * IA (global input) INTEGER
105 * A's global row index, which points to the beginning of the
106 * submatrix which is to be operated on.
107 *
108 * JA (global input) INTEGER
109 * A's global column index, which points to the beginning of
110 * the submatrix which is to be operated on.
111 *
112 * DESCA (global and local input) INTEGER array of dimension DLEN_.
113 * The array descriptor for the distributed matrix A.
114 *
115 * EPSNORMA (input) DOUBLE PRECISION
116 * abstol + eps * inf.norm(A)
117 * Abstol is absolute tolerence for the eigenvalues and is set
118 * in the calling routines, pdsepsubtst and pdsqpsubtst.
119 *
120 * THRESH (input) DOUBLE PRECISION
121 * A test will count as "failed" if the "error", computed as
122 * described below, exceeds THRESH. Note that the error
123 * is scaled to be O(1), so THRESH should be a reasonably
124 * small multiple of 1, e.g., 10 or 100. In particular,
125 * it should not depend on the precision (single vs. double)
126 * or the size of the matrix. It must be at least zero.
127 *
128 * Q (local input) DOUBLE PRECISION array
129 * global dimension (MS, NV), local dimension (DESCA(DLEN_), NQ)
130 *
131 * Contains the eigenvectors as computed by PDSYEVX
132 *
133 * IQ (global input) INTEGER
134 * Q's global row index, which points to the beginning of the
135 * submatrix which is to be operated on.
136 *
137 * JQ (global input) INTEGER
138 * Q's global column index, which points to the beginning of
139 * the submatrix which is to be operated on.
140 *
141 * DESCQ (global and local input) INTEGER array of dimension DLEN_.
142 * The array descriptor for the distributed matrix Q.
143 *
144 * C (local workspace) DOUBLE PRECISION array,
145 * global dimension (NV, NV), local dimension (DESCA(DLEN_), MQ)
146 *
147 * Accumulator for computing AQ -QL
148 *
149 * IC (global input) INTEGER
150 * C's global row index, which points to the beginning of the
151 * submatrix which is to be operated on.
152 *
153 * JC (global input) INTEGER
154 * C's global column index, which points to the beginning of
155 * the submatrix which is to be operated on.
156 *
157 * DESCC (global and local input) INTEGER array of dimension DLEN_.
158 * The array descriptor for the distributed matrix C.
159 *
160 * W (global input) DOUBLE PRECISION array, dimension (NV)
161 *
162 * Contains the computed eigenvalues
163 *
164 * WORK (local workspace) DOUBLE PRECISION array,
165 * dimension (LWORK)
166 *
167 * LWORK (local input) INTEGER
168 * The length of the array WORK.
169 * LWORK >= NUMROC( NV, DESCA( NB_ ), MYCOL, 0, NPCOL )
170 *
171 * TSTNRM (global output) DOUBLE PRECISION
172 * |AQ- QL| / ( EPSNROMA * MS )
173 *
174 * RESULT (global output) INTEGER
175 * 0 if the test passes i.e.
176 * |AQ -QL| / (abstol + eps * norm(A) ) <= n* THRESH
177 * 1 if the test fails i.e.
178 * |AQ -QL| / (abstol + eps * norm(A) ) > n * THRESH
179 *
180 * .. Local Scalars ..
181 *
182  INTEGER INFO, J, LOCALCOL, MP, MYCOL, MYROW, NPCOL,
183  $ NPROW, NQ, PCOL
184  DOUBLE PRECISION NORM
185 * ..
186 * .. Parameters ..
187  INTEGER BLOCK_CYCLIC_2D, DLEN_, DTYPE_, CTXT_, M_, N_,
188  $ MB_, NB_, RSRC_, CSRC_, LLD_
189  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
190  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
191  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
192  DOUBLE PRECISION ONE, NEGONE
193  PARAMETER ( ONE = 1.0d+0, negone = -1.0d+0 )
194 * ..
195 * .. External Functions ..
196  INTEGER INDXG2L, INDXG2P, NUMROC
197  DOUBLE PRECISION PDLANGE
198  EXTERNAL indxg2l, indxg2p, numroc, pdlange
199 * ..
200 * .. External Subroutines ..
201  EXTERNAL blacs_gridinfo, chk1mat, dlacpy, dscal, pdgemm,
202  $ pxerbla
203 * ..
204 * .. Intrinsic Functions ..
205  INTRINSIC max
206 * ..
207 * .. Executable Statements ..
208 * This is just to keep ftnchek happy
209  IF( block_cyclic_2d*csrc_*ctxt_*dlen_*dtype_*lld_*mb_*m_*nb_*n_*
210  $ rsrc_.LT.0 )RETURN
211 *
212  result = 0
213 *
214  CALL blacs_gridinfo( desca( ctxt_ ), nprow, npcol, myrow, mycol )
215 *
216  info = 0
217  CALL chk1mat( ms, 1, ms, 1, ia, ja, desca, 6, info )
218  CALL chk1mat( ms, 1, nv, 2, iq, jq, descq, 12, info )
219  CALL chk1mat( ms, 1, nv, 2, ic, jc, descc, 16, info )
220 *
221  IF( info.EQ.0 ) THEN
222 *
223  mp = numroc( ms, desca( mb_ ), myrow, 0, nprow )
224  nq = numroc( nv, desca( nb_ ), mycol, 0, npcol )
225 *
226  IF( iq.NE.1 ) THEN
227  info = -10
228  ELSE IF( jq.NE.1 ) THEN
229  info = -11
230  ELSE IF( ia.NE.1 ) THEN
231  info = -4
232  ELSE IF( ja.NE.1 ) THEN
233  info = -5
234  ELSE IF( ic.NE.1 ) THEN
235  info = -14
236  ELSE IF( jc.NE.1 ) THEN
237  info = -15
238  ELSE IF( lwork.LT.nq ) THEN
239  info = -19
240  END IF
241  END IF
242 *
243  IF( info.NE.0 ) THEN
244  CALL pxerbla( desca( ctxt_ ), 'PDSEPCHK', -info )
245  RETURN
246  END IF
247 *
248 * C = Q * W
249 *
250  CALL dlacpy( 'A', mp, nq, q, descq( lld_ ), c, descc( lld_ ) )
251 *
252 *
253  DO 10 j = 1, nv
254  pcol = indxg2p( j, descc( nb_ ), 0, 0, npcol )
255  localcol = indxg2l( j, descc( nb_ ), 0, 0, npcol )
256 *
257  IF( mycol.EQ.pcol ) THEN
258  CALL dscal( mp, w( j ), c( ( localcol-1 )*descc( lld_ )+1 ),
259  $ 1 )
260  END IF
261  10 CONTINUE
262 *
263 *
264 * C = C - A * Q
265 *
266  CALL pdgemm( 'N', 'N', ms, nv, ms, negone, a, 1, 1, desca, q, 1,
267  $ 1, descq, one, c, 1, 1, descc )
268 *
269 * Compute the norm of C
270 *
271 *
272  norm = pdlange( 'M', ms, nv, c, 1, 1, descc, work )
273 *
274 *
275  tstnrm = norm / epsnorma / max( ms, 1 )
276 *
277  IF( tstnrm.GT.thresh .OR. ( tstnrm-tstnrm.NE.0.0d0 ) ) THEN
278  result = 1
279  END IF
280 *
281 *
282  RETURN
283 *
284 * End of PDSEPCHK
285 *
286  END
max
#define max(A, B)
Definition: pcgemr.c:180
chk1mat
subroutine chk1mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, INFO)
Definition: chk1mat.f:3
pdsepchk
subroutine pdsepchk(MS, NV, A, IA, JA, DESCA, EPSNORMA, THRESH, Q, IQ, JQ, DESCQ, C, IC, JC, DESCC, W, WORK, LWORK, TSTNRM, RESULT)
Definition: pdsepchk.f:6
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2