ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pzdbtrs.f
Go to the documentation of this file.
1  SUBROUTINE pzdbtrs( TRANS, N, BWL, BWU, NRHS, A, JA, DESCA, B, IB,
2  $ DESCB, AF, LAF, WORK, LWORK, INFO )
3 *
4 *
5 *
6 * -- ScaLAPACK routine (version 1.7) --
7 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
8 * and University of California, Berkeley.
9 * August 7, 2001
10 *
11 * .. Scalar Arguments ..
12  CHARACTER TRANS
13  INTEGER BWL, BWU, IB, INFO, JA, LAF, LWORK, N, NRHS
14 * ..
15 * .. Array Arguments ..
16  INTEGER DESCA( * ), DESCB( * )
17  COMPLEX*16 A( * ), AF( * ), B( * ), WORK( * )
18 * ..
19 *
20 *
21 * Purpose
22 * =======
23 *
24 * PZDBTRS solves a system of linear equations
25 *
26 * A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
27 * or
28 * A(1:N, JA:JA+N-1)' * X = B(IB:IB+N-1, 1:NRHS)
29 *
30 * where A(1:N, JA:JA+N-1) is the matrix used to produce the factors
31 * stored in A(1:N,JA:JA+N-1) and AF by PZDBTRF.
32 * A(1:N, JA:JA+N-1) is an N-by-N complex
33 * banded diagonally dominant-like distributed
34 * matrix with bandwidth BWL, BWU.
35 *
36 * Routine PZDBTRF MUST be called first.
37 *
38 * =====================================================================
39 *
40 * Arguments
41 * =========
42 *
43 *
44 * TRANS (global input) CHARACTER
45 * = 'N': Solve with A(1:N, JA:JA+N-1);
46 * = 'C': Solve with conjugate_transpose( A(1:N, JA:JA+N-1) );
47 *
48 * N (global input) INTEGER
49 * The number of rows and columns to be operated on, i.e. the
50 * order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
51 *
52 * BWL (global input) INTEGER
53 * Number of subdiagonals. 0 <= BWL <= N-1
54 *
55 * BWU (global input) INTEGER
56 * Number of superdiagonals. 0 <= BWU <= N-1
57 *
58 * NRHS (global input) INTEGER
59 * The number of right hand sides, i.e., the number of columns
60 * of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
61 * NRHS >= 0.
62 *
63 * A (local input/local output) COMPLEX*16 pointer into
64 * local memory to an array with first dimension
65 * LLD_A >=(bwl+bwu+1) (stored in DESCA).
66 * On entry, this array contains the local pieces of the
67 * N-by-N unsymmetric banded distributed Cholesky factor L or
68 * L^T A(1:N, JA:JA+N-1).
69 * This local portion is stored in the packed banded format
70 * used in LAPACK. Please see the Notes below and the
71 * ScaLAPACK manual for more detail on the format of
72 * distributed matrices.
73 *
74 * JA (global input) INTEGER
75 * The index in the global array A that points to the start of
76 * the matrix to be operated on (which may be either all of A
77 * or a submatrix of A).
78 *
79 * DESCA (global and local input) INTEGER array of dimension DLEN.
80 * if 1D type (DTYPE_A=501), DLEN >= 7;
81 * if 2D type (DTYPE_A=1), DLEN >= 9 .
82 * The array descriptor for the distributed matrix A.
83 * Contains information of mapping of A to memory. Please
84 * see NOTES below for full description and options.
85 *
86 * B (local input/local output) COMPLEX*16 pointer into
87 * local memory to an array of local lead dimension lld_b>=NB.
88 * On entry, this array contains the
89 * the local pieces of the right hand sides
90 * B(IB:IB+N-1, 1:NRHS).
91 * On exit, this contains the local piece of the solutions
92 * distributed matrix X.
93 *
94 * IB (global input) INTEGER
95 * The row index in the global array B that points to the first
96 * row of the matrix to be operated on (which may be either
97 * all of B or a submatrix of B).
98 *
99 * DESCB (global and local input) INTEGER array of dimension DLEN.
100 * if 1D type (DTYPE_B=502), DLEN >=7;
101 * if 2D type (DTYPE_B=1), DLEN >= 9.
102 * The array descriptor for the distributed matrix B.
103 * Contains information of mapping of B to memory. Please
104 * see NOTES below for full description and options.
105 *
106 * AF (local output) COMPLEX*16 array, dimension LAF.
107 * Auxiliary Fillin Space.
108 * Fillin is created during the factorization routine
109 * PZDBTRF and this is stored in AF. If a linear system
110 * is to be solved using PZDBTRS after the factorization
111 * routine, AF *must not be altered* after the factorization.
112 *
113 * LAF (local input) INTEGER
114 * Size of user-input Auxiliary Fillin space AF. Must be >=
115 * NB*(bwl+bwu)+6*max(bwl,bwu)*max(bwl,bwu)
116 * If LAF is not large enough, an error code will be returned
117 * and the minimum acceptable size will be returned in AF( 1 )
118 *
119 * WORK (local workspace/local output)
120 * COMPLEX*16 temporary workspace. This space may
121 * be overwritten in between calls to routines. WORK must be
122 * the size given in LWORK.
123 * On exit, WORK( 1 ) contains the minimal LWORK.
124 *
125 * LWORK (local input or global input) INTEGER
126 * Size of user-input workspace WORK.
127 * If LWORK is too small, the minimal acceptable size will be
128 * returned in WORK(1) and an error code is returned. LWORK>=
129 * (max(bwl,bwu)*NRHS)
130 *
131 * INFO (global output) INTEGER
132 * = 0: successful exit
133 * < 0: If the i-th argument is an array and the j-entry had
134 * an illegal value, then INFO = -(i*100+j), if the i-th
135 * argument is a scalar and had an illegal value, then
136 * INFO = -i.
137 *
138 * =====================================================================
139 *
140 *
141 * Restrictions
142 * ============
143 *
144 * The following are restrictions on the input parameters. Some of these
145 * are temporary and will be removed in future releases, while others
146 * may reflect fundamental technical limitations.
147 *
148 * Non-cyclic restriction: VERY IMPORTANT!
149 * P*NB>= mod(JA-1,NB)+N.
150 * The mapping for matrices must be blocked, reflecting the nature
151 * of the divide and conquer algorithm as a task-parallel algorithm.
152 * This formula in words is: no processor may have more than one
153 * chunk of the matrix.
154 *
155 * Blocksize cannot be too small:
156 * If the matrix spans more than one processor, the following
157 * restriction on NB, the size of each block on each processor,
158 * must hold:
159 * NB >= 2*MAX(BWL,BWU)
160 * The bulk of parallel computation is done on the matrix of size
161 * O(NB) on each processor. If this is too small, divide and conquer
162 * is a poor choice of algorithm.
163 *
164 * Submatrix reference:
165 * JA = IB
166 * Alignment restriction that prevents unnecessary communication.
167 *
168 *
169 * =====================================================================
170 *
171 *
172 * Notes
173 * =====
174 *
175 * If the factorization routine and the solve routine are to be called
176 * separately (to solve various sets of righthand sides using the same
177 * coefficient matrix), the auxiliary space AF *must not be altered*
178 * between calls to the factorization routine and the solve routine.
179 *
180 * The best algorithm for solving banded and tridiagonal linear systems
181 * depends on a variety of parameters, especially the bandwidth.
182 * Currently, only algorithms designed for the case N/P >> bw are
183 * implemented. These go by many names, including Divide and Conquer,
184 * Partitioning, domain decomposition-type, etc.
185 *
186 * Algorithm description: Divide and Conquer
187 *
188 * The Divide and Conqer algorithm assumes the matrix is narrowly
189 * banded compared with the number of equations. In this situation,
190 * it is best to distribute the input matrix A one-dimensionally,
191 * with columns atomic and rows divided amongst the processes.
192 * The basic algorithm divides the banded matrix up into
193 * P pieces with one stored on each processor,
194 * and then proceeds in 2 phases for the factorization or 3 for the
195 * solution of a linear system.
196 * 1) Local Phase:
197 * The individual pieces are factored independently and in
198 * parallel. These factors are applied to the matrix creating
199 * fillin, which is stored in a non-inspectable way in auxiliary
200 * space AF. Mathematically, this is equivalent to reordering
201 * the matrix A as P A P^T and then factoring the principal
202 * leading submatrix of size equal to the sum of the sizes of
203 * the matrices factored on each processor. The factors of
204 * these submatrices overwrite the corresponding parts of A
205 * in memory.
206 * 2) Reduced System Phase:
207 * A small (max(bwl,bwu)* (P-1)) system is formed representing
208 * interaction of the larger blocks, and is stored (as are its
209 * factors) in the space AF. A parallel Block Cyclic Reduction
210 * algorithm is used. For a linear system, a parallel front solve
211 * followed by an analagous backsolve, both using the structure
212 * of the factored matrix, are performed.
213 * 3) Backsubsitution Phase:
214 * For a linear system, a local backsubstitution is performed on
215 * each processor in parallel.
216 *
217 *
218 * Descriptors
219 * ===========
220 *
221 * Descriptors now have *types* and differ from ScaLAPACK 1.0.
222 *
223 * Note: banded codes can use either the old two dimensional
224 * or new one-dimensional descriptors, though the processor grid in
225 * both cases *must be one-dimensional*. We describe both types below.
226 *
227 * Each global data object is described by an associated description
228 * vector. This vector stores the information required to establish
229 * the mapping between an object element and its corresponding process
230 * and memory location.
231 *
232 * Let A be a generic term for any 2D block cyclicly distributed array.
233 * Such a global array has an associated description vector DESCA.
234 * In the following comments, the character _ should be read as
235 * "of the global array".
236 *
237 * NOTATION STORED IN EXPLANATION
238 * --------------- -------------- --------------------------------------
239 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
240 * DTYPE_A = 1.
241 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
242 * the BLACS process grid A is distribu-
243 * ted over. The context itself is glo-
244 * bal, but the handle (the integer
245 * value) may vary.
246 * M_A (global) DESCA( M_ ) The number of rows in the global
247 * array A.
248 * N_A (global) DESCA( N_ ) The number of columns in the global
249 * array A.
250 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
251 * the rows of the array.
252 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
253 * the columns of the array.
254 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
255 * row of the array A is distributed.
256 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
257 * first column of the array A is
258 * distributed.
259 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
260 * array. LLD_A >= MAX(1,LOCr(M_A)).
261 *
262 * Let K be the number of rows or columns of a distributed matrix,
263 * and assume that its process grid has dimension p x q.
264 * LOCr( K ) denotes the number of elements of K that a process
265 * would receive if K were distributed over the p processes of its
266 * process column.
267 * Similarly, LOCc( K ) denotes the number of elements of K that a
268 * process would receive if K were distributed over the q processes of
269 * its process row.
270 * The values of LOCr() and LOCc() may be determined via a call to the
271 * ScaLAPACK tool function, NUMROC:
272 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
273 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
274 * An upper bound for these quantities may be computed by:
275 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
276 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
277 *
278 *
279 * One-dimensional descriptors:
280 *
281 * One-dimensional descriptors are a new addition to ScaLAPACK since
282 * version 1.0. They simplify and shorten the descriptor for 1D
283 * arrays.
284 *
285 * Since ScaLAPACK supports two-dimensional arrays as the fundamental
286 * object, we allow 1D arrays to be distributed either over the
287 * first dimension of the array (as if the grid were P-by-1) or the
288 * 2nd dimension (as if the grid were 1-by-P). This choice is
289 * indicated by the descriptor type (501 or 502)
290 * as described below.
291 *
292 * IMPORTANT NOTE: the actual BLACS grid represented by the
293 * CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
294 * irrespective of which one-dimensional descriptor type
295 * (501 or 502) is input.
296 * This routine will interpret the grid properly either way.
297 * ScaLAPACK routines *do not support intercontext operations* so that
298 * the grid passed to a single ScaLAPACK routine *must be the same*
299 * for all array descriptors passed to that routine.
300 *
301 * NOTE: In all cases where 1D descriptors are used, 2D descriptors
302 * may also be used, since a one-dimensional array is a special case
303 * of a two-dimensional array with one dimension of size unity.
304 * The two-dimensional array used in this case *must* be of the
305 * proper orientation:
306 * If the appropriate one-dimensional descriptor is DTYPEA=501
307 * (1 by P type), then the two dimensional descriptor must
308 * have a CTXT value that refers to a 1 by P BLACS grid;
309 * If the appropriate one-dimensional descriptor is DTYPEA=502
310 * (P by 1 type), then the two dimensional descriptor must
311 * have a CTXT value that refers to a P by 1 BLACS grid.
312 *
313 *
314 * Summary of allowed descriptors, types, and BLACS grids:
315 * DTYPE 501 502 1 1
316 * BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
317 * -----------------------------------------------------
318 * A OK NO OK NO
319 * B NO OK NO OK
320 *
321 * Note that a consequence of this chart is that it is not possible
322 * for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
323 * to opposite requirements for the orientation of the BLACS grid,
324 * and as noted before, the *same* BLACS context must be used in
325 * all descriptors in a single ScaLAPACK subroutine call.
326 *
327 * Let A be a generic term for any 1D block cyclicly distributed array.
328 * Such a global array has an associated description vector DESCA.
329 * In the following comments, the character _ should be read as
330 * "of the global array".
331 *
332 * NOTATION STORED IN EXPLANATION
333 * --------------- ---------- ------------------------------------------
334 * DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
335 * TYPE_A = 501: 1-by-P grid.
336 * TYPE_A = 502: P-by-1 grid.
337 * CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
338 * the BLACS process grid A is distribu-
339 * ted over. The context itself is glo-
340 * bal, but the handle (the integer
341 * value) may vary.
342 * N_A (global) DESCA( 3 ) The size of the array dimension being
343 * distributed.
344 * NB_A (global) DESCA( 4 ) The blocking factor used to distribute
345 * the distributed dimension of the array.
346 * SRC_A (global) DESCA( 5 ) The process row or column over which the
347 * first row or column of the array
348 * is distributed.
349 * LLD_A (local) DESCA( 6 ) The leading dimension of the local array
350 * storing the local blocks of the distri-
351 * buted array A. Minimum value of LLD_A
352 * depends on TYPE_A.
353 * TYPE_A = 501: LLD_A >=
354 * size of undistributed dimension, 1.
355 * TYPE_A = 502: LLD_A >=NB_A, 1.
356 * Reserved DESCA( 7 ) Reserved for future use.
357 *
358 *
359 *
360 * =====================================================================
361 *
362 * Code Developer: Andrew J. Cleary, University of Tennessee.
363 * Current address: Lawrence Livermore National Labs.
364 * This version released: August, 2001.
365 *
366 * =====================================================================
367 *
368 * ..
369 * .. Parameters ..
370  DOUBLE PRECISION ONE, ZERO
371  parameter( one = 1.0d+0 )
372  parameter( zero = 0.0d+0 )
373  COMPLEX*16 CONE, CZERO
374  parameter( cone = ( 1.0d+0, 0.0d+0 ) )
375  parameter( czero = ( 0.0d+0, 0.0d+0 ) )
376  INTEGER INT_ONE
377  parameter( int_one = 1 )
378  INTEGER DESCMULT, BIGNUM
379  parameter(descmult = 100, bignum = descmult * descmult)
380  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
381  $ lld_, mb_, m_, nb_, n_, rsrc_
382  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
383  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
384  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
385 * ..
386 * .. Local Scalars ..
387  INTEGER CSRC, FIRST_PROC, ICTXT, ICTXT_NEW, ICTXT_SAVE,
388  $ idum2, idum3, ja_new, llda, lldb, mycol, myrow,
389  $ nb, np, npcol, nprow, np_save, part_offset,
390  $ return_code, store_m_b, store_n_a,
391  $ work_size_min
392 * ..
393 * .. Local Arrays ..
394  INTEGER DESCA_1XP( 7 ), DESCB_PX1( 7 ),
395  $ param_check( 17, 3 )
396 * ..
397 * .. External Subroutines ..
398  EXTERNAL blacs_gridinfo, desc_convert, globchk, pxerbla,
399  $ pzdbtrsv, reshape
400 * ..
401 * .. External Functions ..
402  LOGICAL LSAME
403  INTEGER NUMROC
404  EXTERNAL lsame, numroc
405 * ..
406 * .. Intrinsic Functions ..
407  INTRINSIC ichar, min, mod
408 * ..
409 * .. Executable Statements ..
410 *
411 * Test the input parameters
412 *
413  info = 0
414 *
415 * Convert descriptor into standard form for easy access to
416 * parameters, check that grid is of right shape.
417 *
418  desca_1xp( 1 ) = 501
419  descb_px1( 1 ) = 502
420 *
421  CALL desc_convert( desca, desca_1xp, return_code )
422 *
423  IF( return_code .NE. 0) THEN
424  info = -( 8*100 + 2 )
425  ENDIF
426 *
427  CALL desc_convert( descb, descb_px1, return_code )
428 *
429  IF( return_code .NE. 0) THEN
430  info = -( 11*100 + 2 )
431  ENDIF
432 *
433 * Consistency checks for DESCA and DESCB.
434 *
435 * Context must be the same
436  IF( desca_1xp( 2 ) .NE. descb_px1( 2 ) ) THEN
437  info = -( 11*100 + 2 )
438  ENDIF
439 *
440 * These are alignment restrictions that may or may not be removed
441 * in future releases. -Andy Cleary, April 14, 1996.
442 *
443 * Block sizes must be the same
444  IF( desca_1xp( 4 ) .NE. descb_px1( 4 ) ) THEN
445  info = -( 11*100 + 4 )
446  ENDIF
447 *
448 * Source processor must be the same
449 *
450  IF( desca_1xp( 5 ) .NE. descb_px1( 5 ) ) THEN
451  info = -( 11*100 + 5 )
452  ENDIF
453 *
454 * Get values out of descriptor for use in code.
455 *
456  ictxt = desca_1xp( 2 )
457  csrc = desca_1xp( 5 )
458  nb = desca_1xp( 4 )
459  llda = desca_1xp( 6 )
460  store_n_a = desca_1xp( 3 )
461  lldb = descb_px1( 6 )
462  store_m_b = descb_px1( 3 )
463 *
464 * Get grid parameters
465 *
466 *
467  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
468  np = nprow * npcol
469 *
470 *
471 *
472  IF( lsame( trans, 'N' ) ) THEN
473  idum2 = ichar( 'N' )
474  ELSE IF ( lsame( trans, 'C' ) ) THEN
475  idum2 = ichar( 'C' )
476  ELSE
477  info = -1
478  END IF
479 *
480  IF( lwork .LT. -1) THEN
481  info = -15
482  ELSE IF ( lwork .EQ. -1 ) THEN
483  idum3 = -1
484  ELSE
485  idum3 = 1
486  ENDIF
487 *
488  IF( n .LT. 0 ) THEN
489  info = -2
490  ENDIF
491 *
492  IF( n+ja-1 .GT. store_n_a ) THEN
493  info = -( 8*100 + 6 )
494  ENDIF
495 *
496  IF(( bwl .GT. n-1 ) .OR.
497  $ ( bwl .LT. 0 ) ) THEN
498  info = -3
499  ENDIF
500 *
501  IF(( bwu .GT. n-1 ) .OR.
502  $ ( bwu .LT. 0 ) ) THEN
503  info = -4
504  ENDIF
505 *
506  IF( llda .LT. (bwl+bwu+1) ) THEN
507  info = -( 8*100 + 6 )
508  ENDIF
509 *
510  IF( nb .LE. 0 ) THEN
511  info = -( 8*100 + 4 )
512  ENDIF
513 *
514  IF( n+ib-1 .GT. store_m_b ) THEN
515  info = -( 11*100 + 3 )
516  ENDIF
517 *
518  IF( lldb .LT. nb ) THEN
519  info = -( 11*100 + 6 )
520  ENDIF
521 *
522  IF( nrhs .LT. 0 ) THEN
523  info = -5
524  ENDIF
525 *
526 * Current alignment restriction
527 *
528  IF( ja .NE. ib) THEN
529  info = -7
530  ENDIF
531 *
532 * Argument checking that is specific to Divide & Conquer routine
533 *
534  IF( nprow .NE. 1 ) THEN
535  info = -( 8*100+2 )
536  ENDIF
537 *
538  IF( n .GT. np*nb-mod( ja-1, nb )) THEN
539  info = -( 2 )
540  CALL pxerbla( ictxt,
541  $ 'PZDBTRS, D&C alg.: only 1 block per proc',
542  $ -info )
543  RETURN
544  ENDIF
545 *
546  IF((ja+n-1.GT.nb) .AND. ( nb.LT.2*max(bwl,bwu) )) THEN
547  info = -( 8*100+4 )
548  CALL pxerbla( ictxt,
549  $ 'PZDBTRS, D&C alg.: NB too small',
550  $ -info )
551  RETURN
552  ENDIF
553 *
554 *
555  work_size_min =
556  $ (max(bwl,bwu)*nrhs)
557 *
558  work( 1 ) = work_size_min
559 *
560  IF( lwork .LT. work_size_min ) THEN
561  IF( lwork .NE. -1 ) THEN
562  info = -15
563  CALL pxerbla( ictxt,
564  $ 'PZDBTRS: worksize error',
565  $ -info )
566  ENDIF
567  RETURN
568  ENDIF
569 *
570 * Pack params and positions into arrays for global consistency check
571 *
572  param_check( 17, 1 ) = descb(5)
573  param_check( 16, 1 ) = descb(4)
574  param_check( 15, 1 ) = descb(3)
575  param_check( 14, 1 ) = descb(2)
576  param_check( 13, 1 ) = descb(1)
577  param_check( 12, 1 ) = ib
578  param_check( 11, 1 ) = desca(5)
579  param_check( 10, 1 ) = desca(4)
580  param_check( 9, 1 ) = desca(3)
581  param_check( 8, 1 ) = desca(1)
582  param_check( 7, 1 ) = ja
583  param_check( 6, 1 ) = nrhs
584  param_check( 5, 1 ) = bwu
585  param_check( 4, 1 ) = bwl
586  param_check( 3, 1 ) = n
587  param_check( 2, 1 ) = idum3
588  param_check( 1, 1 ) = idum2
589 *
590  param_check( 17, 2 ) = 1105
591  param_check( 16, 2 ) = 1104
592  param_check( 15, 2 ) = 1103
593  param_check( 14, 2 ) = 1102
594  param_check( 13, 2 ) = 1101
595  param_check( 12, 2 ) = 10
596  param_check( 11, 2 ) = 805
597  param_check( 10, 2 ) = 804
598  param_check( 9, 2 ) = 803
599  param_check( 8, 2 ) = 801
600  param_check( 7, 2 ) = 7
601  param_check( 6, 2 ) = 5
602  param_check( 5, 2 ) = 4
603  param_check( 4, 2 ) = 3
604  param_check( 3, 2 ) = 2
605  param_check( 2, 2 ) = 15
606  param_check( 1, 2 ) = 1
607 *
608 * Want to find errors with MIN( ), so if no error, set it to a big
609 * number. If there already is an error, multiply by the the
610 * descriptor multiplier.
611 *
612  IF( info.GE.0 ) THEN
613  info = bignum
614  ELSE IF( info.LT.-descmult ) THEN
615  info = -info
616  ELSE
617  info = -info * descmult
618  END IF
619 *
620 * Check consistency across processors
621 *
622  CALL globchk( ictxt, 17, param_check, 17,
623  $ param_check( 1, 3 ), info )
624 *
625 * Prepare output: set info = 0 if no error, and divide by DESCMULT
626 * if error is not in a descriptor entry.
627 *
628  IF( info.EQ.bignum ) THEN
629  info = 0
630  ELSE IF( mod( info, descmult ) .EQ. 0 ) THEN
631  info = -info / descmult
632  ELSE
633  info = -info
634  END IF
635 *
636  IF( info.LT.0 ) THEN
637  CALL pxerbla( ictxt, 'PZDBTRS', -info )
638  RETURN
639  END IF
640 *
641 * Quick return if possible
642 *
643  IF( n.EQ.0 )
644  $ RETURN
645 *
646  IF( nrhs.EQ.0 )
647  $ RETURN
648 *
649 *
650 * Adjust addressing into matrix space to properly get into
651 * the beginning part of the relevant data
652 *
653  part_offset = nb*( (ja-1)/(npcol*nb) )
654 *
655  IF ( (mycol-csrc) .LT. (ja-part_offset-1)/nb ) THEN
656  part_offset = part_offset + nb
657  ENDIF
658 *
659  IF ( mycol .LT. csrc ) THEN
660  part_offset = part_offset - nb
661  ENDIF
662 *
663 * Form a new BLACS grid (the "standard form" grid) with only procs
664 * holding part of the matrix, of size 1xNP where NP is adjusted,
665 * starting at csrc=0, with JA modified to reflect dropped procs.
666 *
667 * First processor to hold part of the matrix:
668 *
669  first_proc = mod( ( ja-1 )/nb+csrc, npcol )
670 *
671 * Calculate new JA one while dropping off unused processors.
672 *
673  ja_new = mod( ja-1, nb ) + 1
674 *
675 * Save and compute new value of NP
676 *
677  np_save = np
678  np = ( ja_new+n-2 )/nb + 1
679 *
680 * Call utility routine that forms "standard-form" grid
681 *
682  CALL reshape( ictxt, int_one, ictxt_new, int_one,
683  $ first_proc, int_one, np )
684 *
685 * Use new context from standard grid as context.
686 *
687  ictxt_save = ictxt
688  ictxt = ictxt_new
689  desca_1xp( 2 ) = ictxt_new
690  descb_px1( 2 ) = ictxt_new
691 *
692 * Get information about new grid.
693 *
694  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
695 *
696 * Drop out processors that do not have part of the matrix.
697 *
698  IF( myrow .LT. 0 ) THEN
699  GOTO 1234
700  ENDIF
701 *
702 *
703 *
704 * Begin main code
705 *
706  info = 0
707 *
708 * Call frontsolve routine
709 *
710  IF( lsame( trans, 'N' ) ) THEN
711 *
712  CALL pzdbtrsv( 'L', 'N', n, bwl, bwu, nrhs, a( part_offset+1 ),
713  $ ja_new, desca_1xp, b, ib, descb_px1, af, laf,
714  $ work, lwork, info )
715 *
716  ELSE
717 *
718  CALL pzdbtrsv( 'U', 'C', n, bwl, bwu, nrhs, a( part_offset+1 ),
719  $ ja_new, desca_1xp, b, ib, descb_px1, af, laf,
720  $ work, lwork, info )
721 *
722  ENDIF
723 *
724 * Call backsolve routine
725 *
726  IF( lsame( trans, 'C' ) ) THEN
727 *
728  CALL pzdbtrsv( 'L', 'C', n, bwl, bwu, nrhs, a( part_offset+1 ),
729  $ ja_new, desca_1xp, b, ib, descb_px1, af, laf,
730  $ work, lwork, info )
731 *
732  ELSE
733 *
734  CALL pzdbtrsv( 'U', 'N', n, bwl, bwu, nrhs, a( part_offset+1 ),
735  $ ja_new, desca_1xp, b, ib, descb_px1, af, laf,
736  $ work, lwork, info )
737 *
738  ENDIF
739  1000 CONTINUE
740 *
741 *
742 * Free BLACS space used to hold standard-form grid.
743 *
744  IF( ictxt_save .NE. ictxt_new ) THEN
745  CALL blacs_gridexit( ictxt_new )
746  ENDIF
747 *
748  1234 CONTINUE
749 *
750 * Restore saved input parameters
751 *
752  ictxt = ictxt_save
753  np = np_save
754 *
755 * Output minimum worksize
756 *
757  work( 1 ) = work_size_min
758 *
759 *
760  RETURN
761 *
762 * End of PZDBTRS
763 *
764  END
globchk
subroutine globchk(ICTXT, N, X, LDX, IWORK, INFO)
Definition: pchkxmat.f:403
max
#define max(A, B)
Definition: pcgemr.c:180
reshape
void reshape(int *context_in, int *major_in, int *context_out, int *major_out, int *first_proc, int *nprow_new, int *npcol_new)
Definition: reshape.c:77
desc_convert
subroutine desc_convert(DESC_IN, DESC_OUT, INFO)
Definition: desc_convert.f:2
pzdbtrsv
subroutine pzdbtrsv(UPLO, TRANS, N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB, AF, LAF, WORK, LWORK, INFO)
Definition: pzdbtrsv.f:3
pzdbtrs
subroutine pzdbtrs(TRANS, N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB, AF, LAF, WORK, LWORK, INFO)
Definition: pzdbtrs.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181