ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pzdbsv.f
Go to the documentation of this file.
1  SUBROUTINE pzdbsv( N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB,
2  $ WORK, LWORK, INFO )
3 *
4 *
5 *
6 * -- ScaLAPACK routine (version 1.7) --
7 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
8 * and University of California, Berkeley.
9 * November 15, 1997
10 *
11 * .. Scalar Arguments ..
12  INTEGER BWL, BWU, IB, INFO, JA, LWORK, N, NRHS
13 * ..
14 * .. Array Arguments ..
15  INTEGER DESCA( * ), DESCB( * )
16  COMPLEX*16 A( * ), B( * ), WORK( * )
17 * ..
18 *
19 *
20 * Purpose
21 * =======
22 *
23 * PZDBSV solves a system of linear equations
24 *
25 * A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
26 *
27 * where A(1:N, JA:JA+N-1) is an N-by-N complex
28 * banded diagonally dominant-like distributed
29 * matrix with bandwidth BWL, BWU.
30 *
31 * Gaussian elimination without pivoting
32 * is used to factor a reordering
33 * of the matrix into L U.
34 *
35 * See PZDBTRF and PZDBTRS for details.
36 *
37 * =====================================================================
38 *
39 * Arguments
40 * =========
41 *
42 *
43 * N (global input) INTEGER
44 * The number of rows and columns to be operated on, i.e. the
45 * order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
46 *
47 * BWL (global input) INTEGER
48 * Number of subdiagonals. 0 <= BWL <= N-1
49 *
50 * BWU (global input) INTEGER
51 * Number of superdiagonals. 0 <= BWU <= N-1
52 *
53 * NRHS (global input) INTEGER
54 * The number of right hand sides, i.e., the number of columns
55 * of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
56 * NRHS >= 0.
57 *
58 * A (local input/local output) COMPLEX*16 pointer into
59 * local memory to an array with first dimension
60 * LLD_A >=(bwl+bwu+1) (stored in DESCA).
61 * On entry, this array contains the local pieces of the
62 * This local portion is stored in the packed banded format
63 * used in LAPACK. Please see the Notes below and the
64 * ScaLAPACK manual for more detail on the format of
65 * distributed matrices.
66 * On exit, this array contains information containing details
67 * of the factorization.
68 * Note that permutations are performed on the matrix, so that
69 * the factors returned are different from those returned
70 * by LAPACK.
71 *
72 * JA (global input) INTEGER
73 * The index in the global array A that points to the start of
74 * the matrix to be operated on (which may be either all of A
75 * or a submatrix of A).
76 *
77 * DESCA (global and local input) INTEGER array of dimension DLEN.
78 * if 1D type (DTYPE_A=501), DLEN >= 7;
79 * if 2D type (DTYPE_A=1), DLEN >= 9 .
80 * The array descriptor for the distributed matrix A.
81 * Contains information of mapping of A to memory. Please
82 * see NOTES below for full description and options.
83 *
84 * B (local input/local output) COMPLEX*16 pointer into
85 * local memory to an array of local lead dimension lld_b>=NB.
86 * On entry, this array contains the
87 * the local pieces of the right hand sides
88 * B(IB:IB+N-1, 1:NRHS).
89 * On exit, this contains the local piece of the solutions
90 * distributed matrix X.
91 *
92 * IB (global input) INTEGER
93 * The row index in the global array B that points to the first
94 * row of the matrix to be operated on (which may be either
95 * all of B or a submatrix of B).
96 *
97 * DESCB (global and local input) INTEGER array of dimension DLEN.
98 * if 1D type (DTYPE_B=502), DLEN >=7;
99 * if 2D type (DTYPE_B=1), DLEN >= 9.
100 * The array descriptor for the distributed matrix B.
101 * Contains information of mapping of B to memory. Please
102 * see NOTES below for full description and options.
103 *
104 * WORK (local workspace/local output)
105 * COMPLEX*16 temporary workspace. This space may
106 * be overwritten in between calls to routines. WORK must be
107 * the size given in LWORK.
108 * On exit, WORK( 1 ) contains the minimal LWORK.
109 *
110 * LWORK (local input or global input) INTEGER
111 * Size of user-input workspace WORK.
112 * If LWORK is too small, the minimal acceptable size will be
113 * returned in WORK(1) and an error code is returned. LWORK>=
114 * NB*(bwl+bwu)+6*max(bwl,bwu)*max(bwl,bwu)
115 * +max((max(bwl,bwu)*NRHS), max(bwl,bwu)*max(bwl,bwu))
116 *
117 * INFO (global output) INTEGER
118 * = 0: successful exit
119 * < 0: If the i-th argument is an array and the j-entry had
120 * an illegal value, then INFO = -(i*100+j), if the i-th
121 * argument is a scalar and had an illegal value, then
122 * INFO = -i.
123 * > 0: If INFO = K<=NPROCS, the submatrix stored on processor
124 * INFO and factored locally was not
125 * diagonally dominant-like, and
126 * the factorization was not completed.
127 * If INFO = K>NPROCS, the submatrix stored on processor
128 * INFO-NPROCS representing interactions with other
129 * processors was not
130 * stably factorable wo/interchanges,
131 * and the factorization was not completed.
132 *
133 * =====================================================================
134 *
135 *
136 * Restrictions
137 * ============
138 *
139 * The following are restrictions on the input parameters. Some of these
140 * are temporary and will be removed in future releases, while others
141 * may reflect fundamental technical limitations.
142 *
143 * Non-cyclic restriction: VERY IMPORTANT!
144 * P*NB>= mod(JA-1,NB)+N.
145 * The mapping for matrices must be blocked, reflecting the nature
146 * of the divide and conquer algorithm as a task-parallel algorithm.
147 * This formula in words is: no processor may have more than one
148 * chunk of the matrix.
149 *
150 * Blocksize cannot be too small:
151 * If the matrix spans more than one processor, the following
152 * restriction on NB, the size of each block on each processor,
153 * must hold:
154 * NB >= 2*MAX(BWL,BWU)
155 * The bulk of parallel computation is done on the matrix of size
156 * O(NB) on each processor. If this is too small, divide and conquer
157 * is a poor choice of algorithm.
158 *
159 * Submatrix reference:
160 * JA = IB
161 * Alignment restriction that prevents unnecessary communication.
162 *
163 *
164 * =====================================================================
165 *
166 *
167 * Notes
168 * =====
169 *
170 * If the factorization routine and the solve routine are to be called
171 * separately (to solve various sets of righthand sides using the same
172 * coefficient matrix), the auxiliary space AF *must not be altered*
173 * between calls to the factorization routine and the solve routine.
174 *
175 * The best algorithm for solving banded and tridiagonal linear systems
176 * depends on a variety of parameters, especially the bandwidth.
177 * Currently, only algorithms designed for the case N/P >> bw are
178 * implemented. These go by many names, including Divide and Conquer,
179 * Partitioning, domain decomposition-type, etc.
180 *
181 * Algorithm description: Divide and Conquer
182 *
183 * The Divide and Conqer algorithm assumes the matrix is narrowly
184 * banded compared with the number of equations. In this situation,
185 * it is best to distribute the input matrix A one-dimensionally,
186 * with columns atomic and rows divided amongst the processes.
187 * The basic algorithm divides the banded matrix up into
188 * P pieces with one stored on each processor,
189 * and then proceeds in 2 phases for the factorization or 3 for the
190 * solution of a linear system.
191 * 1) Local Phase:
192 * The individual pieces are factored independently and in
193 * parallel. These factors are applied to the matrix creating
194 * fillin, which is stored in a non-inspectable way in auxiliary
195 * space AF. Mathematically, this is equivalent to reordering
196 * the matrix A as P A P^T and then factoring the principal
197 * leading submatrix of size equal to the sum of the sizes of
198 * the matrices factored on each processor. The factors of
199 * these submatrices overwrite the corresponding parts of A
200 * in memory.
201 * 2) Reduced System Phase:
202 * A small (max(bwl,bwu)* (P-1)) system is formed representing
203 * interaction of the larger blocks, and is stored (as are its
204 * factors) in the space AF. A parallel Block Cyclic Reduction
205 * algorithm is used. For a linear system, a parallel front solve
206 * followed by an analagous backsolve, both using the structure
207 * of the factored matrix, are performed.
208 * 3) Backsubsitution Phase:
209 * For a linear system, a local backsubstitution is performed on
210 * each processor in parallel.
211 *
212 *
213 * Descriptors
214 * ===========
215 *
216 * Descriptors now have *types* and differ from ScaLAPACK 1.0.
217 *
218 * Note: banded codes can use either the old two dimensional
219 * or new one-dimensional descriptors, though the processor grid in
220 * both cases *must be one-dimensional*. We describe both types below.
221 *
222 * Each global data object is described by an associated description
223 * vector. This vector stores the information required to establish
224 * the mapping between an object element and its corresponding process
225 * and memory location.
226 *
227 * Let A be a generic term for any 2D block cyclicly distributed array.
228 * Such a global array has an associated description vector DESCA.
229 * In the following comments, the character _ should be read as
230 * "of the global array".
231 *
232 * NOTATION STORED IN EXPLANATION
233 * --------------- -------------- --------------------------------------
234 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
235 * DTYPE_A = 1.
236 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
237 * the BLACS process grid A is distribu-
238 * ted over. The context itself is glo-
239 * bal, but the handle (the integer
240 * value) may vary.
241 * M_A (global) DESCA( M_ ) The number of rows in the global
242 * array A.
243 * N_A (global) DESCA( N_ ) The number of columns in the global
244 * array A.
245 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
246 * the rows of the array.
247 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
248 * the columns of the array.
249 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
250 * row of the array A is distributed.
251 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
252 * first column of the array A is
253 * distributed.
254 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
255 * array. LLD_A >= MAX(1,LOCr(M_A)).
256 *
257 * Let K be the number of rows or columns of a distributed matrix,
258 * and assume that its process grid has dimension p x q.
259 * LOCr( K ) denotes the number of elements of K that a process
260 * would receive if K were distributed over the p processes of its
261 * process column.
262 * Similarly, LOCc( K ) denotes the number of elements of K that a
263 * process would receive if K were distributed over the q processes of
264 * its process row.
265 * The values of LOCr() and LOCc() may be determined via a call to the
266 * ScaLAPACK tool function, NUMROC:
267 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
268 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
269 * An upper bound for these quantities may be computed by:
270 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
271 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
272 *
273 *
274 * One-dimensional descriptors:
275 *
276 * One-dimensional descriptors are a new addition to ScaLAPACK since
277 * version 1.0. They simplify and shorten the descriptor for 1D
278 * arrays.
279 *
280 * Since ScaLAPACK supports two-dimensional arrays as the fundamental
281 * object, we allow 1D arrays to be distributed either over the
282 * first dimension of the array (as if the grid were P-by-1) or the
283 * 2nd dimension (as if the grid were 1-by-P). This choice is
284 * indicated by the descriptor type (501 or 502)
285 * as described below.
286 *
287 * IMPORTANT NOTE: the actual BLACS grid represented by the
288 * CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
289 * irrespective of which one-dimensional descriptor type
290 * (501 or 502) is input.
291 * This routine will interpret the grid properly either way.
292 * ScaLAPACK routines *do not support intercontext operations* so that
293 * the grid passed to a single ScaLAPACK routine *must be the same*
294 * for all array descriptors passed to that routine.
295 *
296 * NOTE: In all cases where 1D descriptors are used, 2D descriptors
297 * may also be used, since a one-dimensional array is a special case
298 * of a two-dimensional array with one dimension of size unity.
299 * The two-dimensional array used in this case *must* be of the
300 * proper orientation:
301 * If the appropriate one-dimensional descriptor is DTYPEA=501
302 * (1 by P type), then the two dimensional descriptor must
303 * have a CTXT value that refers to a 1 by P BLACS grid;
304 * If the appropriate one-dimensional descriptor is DTYPEA=502
305 * (P by 1 type), then the two dimensional descriptor must
306 * have a CTXT value that refers to a P by 1 BLACS grid.
307 *
308 *
309 * Summary of allowed descriptors, types, and BLACS grids:
310 * DTYPE 501 502 1 1
311 * BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
312 * -----------------------------------------------------
313 * A OK NO OK NO
314 * B NO OK NO OK
315 *
316 * Note that a consequence of this chart is that it is not possible
317 * for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
318 * to opposite requirements for the orientation of the BLACS grid,
319 * and as noted before, the *same* BLACS context must be used in
320 * all descriptors in a single ScaLAPACK subroutine call.
321 *
322 * Let A be a generic term for any 1D block cyclicly distributed array.
323 * Such a global array has an associated description vector DESCA.
324 * In the following comments, the character _ should be read as
325 * "of the global array".
326 *
327 * NOTATION STORED IN EXPLANATION
328 * --------------- ---------- ------------------------------------------
329 * DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
330 * TYPE_A = 501: 1-by-P grid.
331 * TYPE_A = 502: P-by-1 grid.
332 * CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
333 * the BLACS process grid A is distribu-
334 * ted over. The context itself is glo-
335 * bal, but the handle (the integer
336 * value) may vary.
337 * N_A (global) DESCA( 3 ) The size of the array dimension being
338 * distributed.
339 * NB_A (global) DESCA( 4 ) The blocking factor used to distribute
340 * the distributed dimension of the array.
341 * SRC_A (global) DESCA( 5 ) The process row or column over which the
342 * first row or column of the array
343 * is distributed.
344 * LLD_A (local) DESCA( 6 ) The leading dimension of the local array
345 * storing the local blocks of the distri-
346 * buted array A. Minimum value of LLD_A
347 * depends on TYPE_A.
348 * TYPE_A = 501: LLD_A >=
349 * size of undistributed dimension, 1.
350 * TYPE_A = 502: LLD_A >=NB_A, 1.
351 * Reserved DESCA( 7 ) Reserved for future use.
352 *
353 *
354 *
355 * =====================================================================
356 *
357 * Code Developer: Andrew J. Cleary, University of Tennessee.
358 * Current address: Lawrence Livermore National Labs.
359 * This version released: August, 2001.
360 *
361 * =====================================================================
362 *
363 * ..
364 * .. Parameters ..
365  DOUBLE PRECISION ONE, ZERO
366  parameter( one = 1.0d+0 )
367  parameter( zero = 0.0d+0 )
368  COMPLEX*16 CONE, CZERO
369  parameter( cone = ( 1.0d+0, 0.0d+0 ) )
370  parameter( czero = ( 0.0d+0, 0.0d+0 ) )
371  INTEGER INT_ONE
372  parameter( int_one = 1 )
373  INTEGER DESCMULT, BIGNUM
374  parameter(descmult = 100, bignum = descmult * descmult)
375  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
376  $ lld_, mb_, m_, nb_, n_, rsrc_
377  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
378  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
379  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
380 * ..
381 * .. Local Scalars ..
382  INTEGER ICTXT, MYCOL, MYROW, NB, NPCOL, NPROW,
383  $ ws_factor
384 * ..
385 * .. External Subroutines ..
386  EXTERNAL pxerbla, pzdbtrf, pzdbtrs
387 * ..
388 * .. Executable Statements ..
389 *
390 * Note: to avoid duplication, most error checking is not performed
391 * in this routine and is left to routines
392 * PZDBTRF and PZDBTRS.
393 *
394 * Begin main code
395 *
396  info = 0
397 *
398 * Get block size to calculate workspace requirements
399 *
400  IF( desca( dtype_ ) .EQ. block_cyclic_2d ) THEN
401  nb = desca( nb_ )
402  ictxt = desca( ctxt_ )
403  ELSEIF( desca( dtype_ ) .EQ. 501 ) THEN
404  nb = desca( 4 )
405  ictxt = desca( 2 )
406  ELSE
407  info = -( 6*100 + dtype_ )
408  CALL pxerbla( ictxt,
409  $ 'PZDBSV',
410  $ -info )
411  RETURN
412  ENDIF
413 *
414  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
415 *
416 *
417 * Size needed for AF in factorization
418 *
419  ws_factor = nb*(bwl+bwu)+6*max(bwl,bwu)*max(bwl,bwu)
420 *
421 * Factor the matrix
422 *
423  CALL pzdbtrf( n, bwl, bwu, a, ja, desca, work,
424  $ min( lwork, ws_factor ), work( 1+ws_factor ),
425  $ lwork-ws_factor, info )
426 *
427 * Check info for error conditions
428 *
429  IF( info.NE.0 ) THEN
430  IF( info .LT. 0 ) THEN
431  CALL pxerbla( ictxt, 'PZDBSV', -info )
432  ENDIF
433  RETURN
434  END IF
435 *
436 * Solve the system using the factorization
437 *
438  CALL pzdbtrs( 'N', n, bwl, bwu, nrhs, a, ja, desca, b, ib, descb,
439  $ work, min( lwork, ws_factor ), work( 1+ws_factor),
440  $ lwork-ws_factor, info )
441 *
442 * Check info for error conditions
443 *
444  IF( info.NE.0 ) THEN
445  CALL pxerbla( ictxt, 'PZDBSV', -info )
446  RETURN
447  END IF
448 *
449  RETURN
450 *
451 * End of PZDBSV
452 *
453  END
max
#define max(A, B)
Definition: pcgemr.c:180
pzdbtrf
subroutine pzdbtrf(N, BWL, BWU, A, JA, DESCA, AF, LAF, WORK, LWORK, INFO)
Definition: pzdbtrf.f:3
pzdbtrs
subroutine pzdbtrs(TRANS, N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB, AF, LAF, WORK, LWORK, INFO)
Definition: pzdbtrs.f:3
pzdbsv
subroutine pzdbsv(N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB, WORK, LWORK, INFO)
Definition: pzdbsv.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181