SCALAPACK 2.2.2
LAPACK: Linear Algebra PACKage
Loading...
Searching...
No Matches
pspbtrsv.f
Go to the documentation of this file.
1 SUBROUTINE pspbtrsv( UPLO, TRANS, N, BW, NRHS, A, JA, DESCA, B,
2 $ IB, DESCB, AF, LAF, WORK, LWORK, INFO )
3*
4* -- ScaLAPACK routine (version 2.0.2) --
5* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver
6* May 1 2012
7*
8* .. Scalar Arguments ..
9 CHARACTER TRANS, UPLO
10 INTEGER BW, IB, INFO, JA, LAF, LWORK, N, NRHS
11* ..
12* .. Array Arguments ..
13 INTEGER DESCA( * ), DESCB( * )
14 REAL A( * ), AF( * ), B( * ), WORK( * )
15* ..
16*
17*
18* Purpose
19* =======
20*
21* PSPBTRSV solves a banded triangular system of linear equations
22*
23* A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
24* or
25* A(1:N, JA:JA+N-1)^T * X = B(IB:IB+N-1, 1:NRHS)
26*
27* where A(1:N, JA:JA+N-1) is a banded
28* triangular matrix factor produced by the
29* Cholesky factorization code PSPBTRF
30* and is stored in A(1:N,JA:JA+N-1) and AF.
31* The matrix stored in A(1:N, JA:JA+N-1) is either
32* upper or lower triangular according to UPLO,
33* and the choice of solving A(1:N, JA:JA+N-1) or A(1:N, JA:JA+N-1)^T
34* is dictated by the user by the parameter TRANS.
35*
36* Routine PSPBTRF MUST be called first.
37*
38* =====================================================================
39*
40* Arguments
41* =========
42*
43* UPLO (global input) CHARACTER
44* = 'U': Upper triangle of A(1:N, JA:JA+N-1) is stored;
45* = 'L': Lower triangle of A(1:N, JA:JA+N-1) is stored.
46*
47* TRANS (global input) CHARACTER
48* = 'N': Solve with A(1:N, JA:JA+N-1);
49* = 'T' or 'C': Solve with A(1:N, JA:JA+N-1)^T;
50*
51* N (global input) INTEGER
52* The number of rows and columns to be operated on, i.e. the
53* order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
54*
55* BW (global input) INTEGER
56* Number of subdiagonals in L or U. 0 <= BW <= N-1
57*
58* NRHS (global input) INTEGER
59* The number of right hand sides, i.e., the number of columns
60* of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
61* NRHS >= 0.
62*
63* A (local input/local output) REAL pointer into
64* local memory to an array with first dimension
65* LLD_A >=(bw+1) (stored in DESCA).
66* On entry, this array contains the local pieces of the
67* N-by-N symmetric banded distributed Cholesky factor L or
68* L^T A(1:N, JA:JA+N-1).
69* This local portion is stored in the packed banded format
70* used in LAPACK. Please see the Notes below and the
71* ScaLAPACK manual for more detail on the format of
72* distributed matrices.
73*
74* JA (global input) INTEGER
75* The index in the global array A that points to the start of
76* the matrix to be operated on (which may be either all of A
77* or a submatrix of A).
78*
79* DESCA (global and local input) INTEGER array of dimension DLEN.
80* if 1D type (DTYPE_A=501), DLEN >= 7;
81* if 2D type (DTYPE_A=1), DLEN >= 9 .
82* The array descriptor for the distributed matrix A.
83* Contains information of mapping of A to memory. Please
84* see NOTES below for full description and options.
85*
86* B (local input/local output) REAL pointer into
87* local memory to an array of local lead dimension lld_b>=NB.
88* On entry, this array contains the
89* the local pieces of the right hand sides
90* B(IB:IB+N-1, 1:NRHS).
91* On exit, this contains the local piece of the solutions
92* distributed matrix X.
93*
94* IB (global input) INTEGER
95* The row index in the global array B that points to the first
96* row of the matrix to be operated on (which may be either
97* all of B or a submatrix of B).
98*
99* DESCB (global and local input) INTEGER array of dimension DLEN.
100* if 1D type (DTYPE_B=502), DLEN >=7;
101* if 2D type (DTYPE_B=1), DLEN >= 9.
102* The array descriptor for the distributed matrix B.
103* Contains information of mapping of B to memory. Please
104* see NOTES below for full description and options.
105*
106* AF (local output) REAL array, dimension LAF.
107* Auxiliary Fillin Space.
108* Fillin is created during the factorization routine
109* PSPBTRF and this is stored in AF. If a linear system
110* is to be solved using PSPBTRS after the factorization
111* routine, AF *must not be altered* after the factorization.
112*
113* LAF (local input) INTEGER
114* Size of user-input Auxiliary Fillin space AF. Must be >=
115* (NB+2*bw)*bw
116* If LAF is not large enough, an error code will be returned
117* and the minimum acceptable size will be returned in AF( 1 )
118*
119* WORK (local workspace/local output)
120* REAL temporary workspace. This space may
121* be overwritten in between calls to routines. WORK must be
122* the size given in LWORK.
123* On exit, WORK( 1 ) contains the minimal LWORK.
124*
125* LWORK (local input or global input) INTEGER
126* Size of user-input workspace WORK.
127* If LWORK is too small, the minimal acceptable size will be
128* returned in WORK(1) and an error code is returned. LWORK>=
129* (bw*NRHS)
130*
131* INFO (global output) INTEGER
132* = 0: successful exit
133* < 0: If the i-th argument is an array and the j-entry had
134* an illegal value, then INFO = -(i*100+j), if the i-th
135* argument is a scalar and had an illegal value, then
136* INFO = -i.
137*
138* =====================================================================
139*
140*
141* Restrictions
142* ============
143*
144* The following are restrictions on the input parameters. Some of these
145* are temporary and will be removed in future releases, while others
146* may reflect fundamental technical limitations.
147*
148* Non-cyclic restriction: VERY IMPORTANT!
149* P*NB>= mod(JA-1,NB)+N.
150* The mapping for matrices must be blocked, reflecting the nature
151* of the divide and conquer algorithm as a task-parallel algorithm.
152* This formula in words is: no processor may have more than one
153* chunk of the matrix.
154*
155* Blocksize cannot be too small:
156* If the matrix spans more than one processor, the following
157* restriction on NB, the size of each block on each processor,
158* must hold:
159* NB >= 2*BW
160* The bulk of parallel computation is done on the matrix of size
161* O(NB) on each processor. If this is too small, divide and conquer
162* is a poor choice of algorithm.
163*
164* Submatrix reference:
165* JA = IB
166* Alignment restriction that prevents unnecessary communication.
167*
168*
169* =====================================================================
170*
171*
172* Notes
173* =====
174*
175* If the factorization routine and the solve routine are to be called
176* separately (to solve various sets of righthand sides using the same
177* coefficient matrix), the auxiliary space AF *must not be altered*
178* between calls to the factorization routine and the solve routine.
179*
180* The best algorithm for solving banded and tridiagonal linear systems
181* depends on a variety of parameters, especially the bandwidth.
182* Currently, only algorithms designed for the case N/P >> bw are
183* implemented. These go by many names, including Divide and Conquer,
184* Partitioning, domain decomposition-type, etc.
185*
186* Algorithm description: Divide and Conquer
187*
188* The Divide and Conqer algorithm assumes the matrix is narrowly
189* banded compared with the number of equations. In this situation,
190* it is best to distribute the input matrix A one-dimensionally,
191* with columns atomic and rows divided amongst the processes.
192* The basic algorithm divides the banded matrix up into
193* P pieces with one stored on each processor,
194* and then proceeds in 2 phases for the factorization or 3 for the
195* solution of a linear system.
196* 1) Local Phase:
197* The individual pieces are factored independently and in
198* parallel. These factors are applied to the matrix creating
199* fillin, which is stored in a non-inspectable way in auxiliary
200* space AF. Mathematically, this is equivalent to reordering
201* the matrix A as P A P^T and then factoring the principal
202* leading submatrix of size equal to the sum of the sizes of
203* the matrices factored on each processor. The factors of
204* these submatrices overwrite the corresponding parts of A
205* in memory.
206* 2) Reduced System Phase:
207* A small (BW* (P-1)) system is formed representing
208* interaction of the larger blocks, and is stored (as are its
209* factors) in the space AF. A parallel Block Cyclic Reduction
210* algorithm is used. For a linear system, a parallel front solve
211* followed by an analagous backsolve, both using the structure
212* of the factored matrix, are performed.
213* 3) Backsubsitution Phase:
214* For a linear system, a local backsubstitution is performed on
215* each processor in parallel.
216*
217*
218* Descriptors
219* ===========
220*
221* Descriptors now have *types* and differ from ScaLAPACK 1.0.
222*
223* Note: banded codes can use either the old two dimensional
224* or new one-dimensional descriptors, though the processor grid in
225* both cases *must be one-dimensional*. We describe both types below.
226*
227* Each global data object is described by an associated description
228* vector. This vector stores the information required to establish
229* the mapping between an object element and its corresponding process
230* and memory location.
231*
232* Let A be a generic term for any 2D block cyclicly distributed array.
233* Such a global array has an associated description vector DESCA.
234* In the following comments, the character _ should be read as
235* "of the global array".
236*
237* NOTATION STORED IN EXPLANATION
238* --------------- -------------- --------------------------------------
239* DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
240* DTYPE_A = 1.
241* CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
242* the BLACS process grid A is distribu-
243* ted over. The context itself is glo-
244* bal, but the handle (the integer
245* value) may vary.
246* M_A (global) DESCA( M_ ) The number of rows in the global
247* array A.
248* N_A (global) DESCA( N_ ) The number of columns in the global
249* array A.
250* MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
251* the rows of the array.
252* NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
253* the columns of the array.
254* RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
255* row of the array A is distributed.
256* CSRC_A (global) DESCA( CSRC_ ) The process column over which the
257* first column of the array A is
258* distributed.
259* LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
260* array. LLD_A >= MAX(1,LOCr(M_A)).
261*
262* Let K be the number of rows or columns of a distributed matrix,
263* and assume that its process grid has dimension p x q.
264* LOCr( K ) denotes the number of elements of K that a process
265* would receive if K were distributed over the p processes of its
266* process column.
267* Similarly, LOCc( K ) denotes the number of elements of K that a
268* process would receive if K were distributed over the q processes of
269* its process row.
270* The values of LOCr() and LOCc() may be determined via a call to the
271* ScaLAPACK tool function, NUMROC:
272* LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
273* LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
274* An upper bound for these quantities may be computed by:
275* LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
276* LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
277*
278*
279* One-dimensional descriptors:
280*
281* One-dimensional descriptors are a new addition to ScaLAPACK since
282* version 1.0. They simplify and shorten the descriptor for 1D
283* arrays.
284*
285* Since ScaLAPACK supports two-dimensional arrays as the fundamental
286* object, we allow 1D arrays to be distributed either over the
287* first dimension of the array (as if the grid were P-by-1) or the
288* 2nd dimension (as if the grid were 1-by-P). This choice is
289* indicated by the descriptor type (501 or 502)
290* as described below.
291*
292* IMPORTANT NOTE: the actual BLACS grid represented by the
293* CTXT entry in the descriptor may be *either* P-by-1 or 1-by-P
294* irrespective of which one-dimensional descriptor type
295* (501 or 502) is input.
296* This routine will interpret the grid properly either way.
297* ScaLAPACK routines *do not support intercontext operations* so that
298* the grid passed to a single ScaLAPACK routine *must be the same*
299* for all array descriptors passed to that routine.
300*
301* NOTE: In all cases where 1D descriptors are used, 2D descriptors
302* may also be used, since a one-dimensional array is a special case
303* of a two-dimensional array with one dimension of size unity.
304* The two-dimensional array used in this case *must* be of the
305* proper orientation:
306* If the appropriate one-dimensional descriptor is DTYPEA=501
307* (1 by P type), then the two dimensional descriptor must
308* have a CTXT value that refers to a 1 by P BLACS grid;
309* If the appropriate one-dimensional descriptor is DTYPEA=502
310* (P by 1 type), then the two dimensional descriptor must
311* have a CTXT value that refers to a P by 1 BLACS grid.
312*
313*
314* Summary of allowed descriptors, types, and BLACS grids:
315* DTYPE 501 502 1 1
316* BLACS grid 1xP or Px1 1xP or Px1 1xP Px1
317* -----------------------------------------------------
318* A OK NO OK NO
319* B NO OK NO OK
320*
321* Note that a consequence of this chart is that it is not possible
322* for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
323* to opposite requirements for the orientation of the BLACS grid,
324* and as noted before, the *same* BLACS context must be used in
325* all descriptors in a single ScaLAPACK subroutine call.
326*
327* Let A be a generic term for any 1D block cyclicly distributed array.
328* Such a global array has an associated description vector DESCA.
329* In the following comments, the character _ should be read as
330* "of the global array".
331*
332* NOTATION STORED IN EXPLANATION
333* --------------- ---------- ------------------------------------------
334* DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
335* TYPE_A = 501: 1-by-P grid.
336* TYPE_A = 502: P-by-1 grid.
337* CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
338* the BLACS process grid A is distribu-
339* ted over. The context itself is glo-
340* bal, but the handle (the integer
341* value) may vary.
342* N_A (global) DESCA( 3 ) The size of the array dimension being
343* distributed.
344* NB_A (global) DESCA( 4 ) The blocking factor used to distribute
345* the distributed dimension of the array.
346* SRC_A (global) DESCA( 5 ) The process row or column over which the
347* first row or column of the array
348* is distributed.
349* LLD_A (local) DESCA( 6 ) The leading dimension of the local array
350* storing the local blocks of the distri-
351* buted array A. Minimum value of LLD_A
352* depends on TYPE_A.
353* TYPE_A = 501: LLD_A >=
354* size of undistributed dimension, 1.
355* TYPE_A = 502: LLD_A >=NB_A, 1.
356* Reserved DESCA( 7 ) Reserved for future use.
357*
358*
359*
360* =====================================================================
361*
362* Code Developer: Andrew J. Cleary, University of Tennessee.
363* Current address: Lawrence Livermore National Labs.
364*
365* =====================================================================
366*
367* .. Parameters ..
368 REAL ONE
369 parameter( one = 1.0e+0 )
370 REAL ZERO
371 parameter( zero = 0.0e+0 )
372 INTEGER INT_ONE
373 parameter( int_one = 1 )
374 INTEGER DESCMULT, BIGNUM
375 parameter( descmult = 100, bignum = descmult*descmult )
376 INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
377 $ lld_, mb_, m_, nb_, n_, rsrc_
378 parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
379 $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
380 $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
381* ..
382* .. Local Scalars ..
383 INTEGER CSRC, FIRST_PROC, ICTXT, ICTXT_NEW, ICTXT_SAVE,
384 $ idum1, idum2, idum3, ja_new, level_dist, llda,
385 $ lldb, mbw2, mycol, myrow, my_num_cols, nb, np,
386 $ npcol, nprow, np_save, odd_size, ofst,
387 $ part_offset, part_size, return_code, store_m_b,
388 $ store_n_a, work_size_min
389* ..
390* .. Local Arrays ..
391 INTEGER DESCA_1XP( 7 ), DESCB_PX1( 7 ),
392 $ param_check( 17, 3 )
393* ..
394* .. External Subroutines ..
395 EXTERNAL blacs_gridexit, blacs_gridinfo, desc_convert,
396 $ globchk, pxerbla, reshape, sgemm, sgerv2d,
397 $ sgesd2d, slamov, smatadd, stbtrs, strmm, strtrs
398* ..
399* .. External Functions ..
400 LOGICAL LSAME
401 INTEGER NUMROC
402 EXTERNAL lsame, numroc
403* ..
404* .. Intrinsic Functions ..
405 INTRINSIC ichar, mod
406* ..
407* .. Executable Statements ..
408*
409* Test the input parameters
410*
411 info = 0
412*
413* Convert descriptor into standard form for easy access to
414* parameters, check that grid is of right shape.
415*
416 desca_1xp( 1 ) = 501
417 descb_px1( 1 ) = 502
418*
419 CALL desc_convert( desca, desca_1xp, return_code )
420*
421 IF( return_code.NE.0 ) THEN
422 info = -( 8*100+2 )
423 END IF
424*
425 CALL desc_convert( descb, descb_px1, return_code )
426*
427 IF( return_code.NE.0 ) THEN
428 info = -( 11*100+2 )
429 END IF
430*
431* Consistency checks for DESCA and DESCB.
432*
433* Context must be the same
434 IF( desca_1xp( 2 ).NE.descb_px1( 2 ) ) THEN
435 info = -( 11*100+2 )
436 END IF
437*
438* These are alignment restrictions that may or may not be removed
439* in future releases. -Andy Cleary, April 14, 1996.
440*
441* Block sizes must be the same
442 IF( desca_1xp( 4 ).NE.descb_px1( 4 ) ) THEN
443 info = -( 11*100+4 )
444 END IF
445*
446* Source processor must be the same
447*
448 IF( desca_1xp( 5 ).NE.descb_px1( 5 ) ) THEN
449 info = -( 11*100+5 )
450 END IF
451*
452* Get values out of descriptor for use in code.
453*
454 ictxt = desca_1xp( 2 )
455 csrc = desca_1xp( 5 )
456 nb = desca_1xp( 4 )
457 llda = desca_1xp( 6 )
458 store_n_a = desca_1xp( 3 )
459 lldb = descb_px1( 6 )
460 store_m_b = descb_px1( 3 )
461*
462* Get grid parameters
463*
464*
465* Pre-calculate bw^2
466*
467 mbw2 = bw*bw
468*
469 CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
470 np = nprow*npcol
471*
472*
473*
474 IF( lsame( uplo, 'U' ) ) THEN
475 idum1 = ichar( 'U' )
476 ELSE IF( lsame( uplo, 'L' ) ) THEN
477 idum1 = ichar( 'L' )
478 ELSE
479 info = -1
480 END IF
481*
482 IF( lsame( trans, 'N' ) ) THEN
483 idum2 = ichar( 'N' )
484 ELSE IF( lsame( trans, 'T' ) ) THEN
485 idum2 = ichar( 'T' )
486 ELSE IF( lsame( trans, 'C' ) ) THEN
487 idum2 = ichar( 'T' )
488 ELSE
489 info = -2
490 END IF
491*
492 IF( lwork.LT.-1 ) THEN
493 info = -14
494 ELSE IF( lwork.EQ.-1 ) THEN
495 idum3 = -1
496 ELSE
497 idum3 = 1
498 END IF
499*
500 IF( n.LT.0 ) THEN
501 info = -3
502 END IF
503*
504 IF( n+ja-1.GT.store_n_a ) THEN
505 info = -( 8*100+6 )
506 END IF
507*
508 IF( ( bw.GT.n-1 ) .OR. ( bw.LT.0 ) ) THEN
509 info = -4
510 END IF
511*
512 IF( llda.LT.( bw+1 ) ) THEN
513 info = -( 8*100+6 )
514 END IF
515*
516 IF( nb.LE.0 ) THEN
517 info = -( 8*100+4 )
518 END IF
519*
520 IF( n+ib-1.GT.store_m_b ) THEN
521 info = -( 11*100+3 )
522 END IF
523*
524 IF( lldb.LT.nb ) THEN
525 info = -( 11*100+6 )
526 END IF
527*
528 IF( nrhs.LT.0 ) THEN
529 info = -5
530 END IF
531*
532* Current alignment restriction
533*
534 IF( ja.NE.ib ) THEN
535 info = -7
536 END IF
537*
538* Argument checking that is specific to Divide & Conquer routine
539*
540 IF( nprow.NE.1 ) THEN
541 info = -( 8*100+2 )
542 END IF
543*
544 IF( n.GT.np*nb-mod( ja-1, nb ) ) THEN
545 info = -( 3 )
546 CALL pxerbla( ictxt,
547 $ 'PSPBTRSV, D&C alg.: only 1 block per proc',
548 $ -info )
549 RETURN
550 END IF
551*
552 IF( ( ja+n-1.GT.nb ) .AND. ( nb.LT.2*bw ) ) THEN
553 info = -( 8*100+4 )
554 CALL pxerbla( ictxt, 'PSPBTRSV, D&C alg.: NB too small',
555 $ -info )
556 RETURN
557 END IF
558*
559*
560 work_size_min = bw*nrhs
561*
562 work( 1 ) = work_size_min
563*
564 IF( lwork.LT.work_size_min ) THEN
565 IF( lwork.NE.-1 ) THEN
566 info = -14
567 CALL pxerbla( ictxt, 'PSPBTRSV: worksize error', -info )
568 END IF
569 RETURN
570 END IF
571*
572* Pack params and positions into arrays for global consistency check
573*
574 param_check( 17, 1 ) = descb( 5 )
575 param_check( 16, 1 ) = descb( 4 )
576 param_check( 15, 1 ) = descb( 3 )
577 param_check( 14, 1 ) = descb( 2 )
578 param_check( 13, 1 ) = descb( 1 )
579 param_check( 12, 1 ) = ib
580 param_check( 11, 1 ) = desca( 5 )
581 param_check( 10, 1 ) = desca( 4 )
582 param_check( 9, 1 ) = desca( 3 )
583 param_check( 8, 1 ) = desca( 1 )
584 param_check( 7, 1 ) = ja
585 param_check( 6, 1 ) = nrhs
586 param_check( 5, 1 ) = bw
587 param_check( 4, 1 ) = n
588 param_check( 3, 1 ) = idum3
589 param_check( 2, 1 ) = idum2
590 param_check( 1, 1 ) = idum1
591*
592 param_check( 17, 2 ) = 1105
593 param_check( 16, 2 ) = 1104
594 param_check( 15, 2 ) = 1103
595 param_check( 14, 2 ) = 1102
596 param_check( 13, 2 ) = 1101
597 param_check( 12, 2 ) = 10
598 param_check( 11, 2 ) = 805
599 param_check( 10, 2 ) = 804
600 param_check( 9, 2 ) = 803
601 param_check( 8, 2 ) = 801
602 param_check( 7, 2 ) = 7
603 param_check( 6, 2 ) = 5
604 param_check( 5, 2 ) = 4
605 param_check( 4, 2 ) = 3
606 param_check( 3, 2 ) = 14
607 param_check( 2, 2 ) = 2
608 param_check( 1, 2 ) = 1
609*
610* Want to find errors with MIN( ), so if no error, set it to a big
611* number. If there already is an error, multiply by the the
612* descriptor multiplier.
613*
614 IF( info.GE.0 ) THEN
615 info = bignum
616 ELSE IF( info.LT.-descmult ) THEN
617 info = -info
618 ELSE
619 info = -info*descmult
620 END IF
621*
622* Check consistency across processors
623*
624 CALL globchk( ictxt, 17, param_check, 17, param_check( 1, 3 ),
625 $ info )
626*
627* Prepare output: set info = 0 if no error, and divide by DESCMULT
628* if error is not in a descriptor entry.
629*
630 IF( info.EQ.bignum ) THEN
631 info = 0
632 ELSE IF( mod( info, descmult ).EQ.0 ) THEN
633 info = -info / descmult
634 ELSE
635 info = -info
636 END IF
637*
638 IF( info.LT.0 ) THEN
639 CALL pxerbla( ictxt, 'PSPBTRSV', -info )
640 RETURN
641 END IF
642*
643* Quick return if possible
644*
645 IF( n.EQ.0 )
646 $ RETURN
647*
648 IF( nrhs.EQ.0 )
649 $ RETURN
650*
651*
652* Adjust addressing into matrix space to properly get into
653* the beginning part of the relevant data
654*
655 part_offset = nb*( ( ja-1 ) / ( npcol*nb ) )
656*
657 IF( ( mycol-csrc ).LT.( ja-part_offset-1 ) / nb ) THEN
658 part_offset = part_offset + nb
659 END IF
660*
661 IF( mycol.LT.csrc ) THEN
662 part_offset = part_offset - nb
663 END IF
664*
665* Form a new BLACS grid (the "standard form" grid) with only procs
666* holding part of the matrix, of size 1xNP where NP is adjusted,
667* starting at csrc=0, with JA modified to reflect dropped procs.
668*
669* First processor to hold part of the matrix:
670*
671 first_proc = mod( ( ja-1 ) / nb+csrc, npcol )
672*
673* Calculate new JA one while dropping off unused processors.
674*
675 ja_new = mod( ja-1, nb ) + 1
676*
677* Save and compute new value of NP
678*
679 np_save = np
680 np = ( ja_new+n-2 ) / nb + 1
681*
682* Call utility routine that forms "standard-form" grid
683*
684 CALL reshape( ictxt, int_one, ictxt_new, int_one, first_proc,
685 $ int_one, np )
686*
687* Use new context from standard grid as context.
688*
689 ictxt_save = ictxt
690 ictxt = ictxt_new
691 desca_1xp( 2 ) = ictxt_new
692 descb_px1( 2 ) = ictxt_new
693*
694* Get information about new grid.
695*
696 CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
697*
698* Drop out processors that do not have part of the matrix.
699*
700 IF( myrow.LT.0 ) THEN
701 GO TO 180
702 END IF
703*
704* ********************************
705* Values reused throughout routine
706*
707* User-input value of partition size
708*
709 part_size = nb
710*
711* Number of columns in each processor
712*
713 my_num_cols = numroc( n, part_size, mycol, 0, npcol )
714*
715* Offset in columns to beginning of main partition in each proc
716*
717 IF( mycol.EQ.0 ) THEN
718 part_offset = part_offset + mod( ja_new-1, part_size )
719 my_num_cols = my_num_cols - mod( ja_new-1, part_size )
720 END IF
721*
722* Offset in elements
723*
724 ofst = part_offset*llda
725*
726* Size of main (or odd) partition in each processor
727*
728 odd_size = my_num_cols
729 IF( mycol.LT.np-1 ) THEN
730 odd_size = odd_size - bw
731 END IF
732*
733*
734*
735* Begin main code
736*
737 IF( lsame( uplo, 'L' ) ) THEN
738*
739 IF( lsame( trans, 'N' ) ) THEN
740*
741* Frontsolve
742*
743*
744******************************************
745* Local computation phase
746******************************************
747*
748* Use main partition in each processor to solve locally
749*
750 CALL stbtrs( uplo, 'N', 'N', odd_size, bw, nrhs,
751 $ a( ofst+1 ), llda, b( part_offset+1 ), lldb,
752 $ info )
753*
754*
755 IF( mycol.LT.np-1 ) THEN
756* Use factorization of odd-even connection block to modify
757* locally stored portion of right hand side(s)
758*
759*
760* First copy and multiply it into temporary storage,
761* then use it on RHS
762*
763 CALL slamov( 'N', bw, nrhs,
764 $ b( part_offset+odd_size-bw+1 ), lldb,
765 $ work( 1 ), bw )
766*
767 CALL strmm( 'L', 'U', 'N', 'N', bw, nrhs, -one,
768 $ a( ( ofst+( bw+1 )+( odd_size-bw )*llda ) ),
769 $ llda-1, work( 1 ), bw )
770*
771 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
772 $ b( part_offset+odd_size+1 ), lldb )
773*
774 END IF
775*
776*
777 IF( mycol.NE.0 ) THEN
778* Use the "spike" fillin to calculate contribution to previous
779* processor's righthand-side.
780*
781 CALL sgemm( 'T', 'N', bw, nrhs, odd_size, -one, af( 1 ),
782 $ odd_size, b( part_offset+1 ), lldb, zero,
783 $ work( 1+bw-bw ), bw )
784 END IF
785*
786*
787************************************************
788* Formation and solution of reduced system
789************************************************
790*
791*
792* Send modifications to prior processor's right hand sides
793*
794 IF( mycol.GT.0 ) THEN
795*
796 CALL sgesd2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
797 $ mycol-1 )
798*
799 END IF
800*
801* Receive modifications to processor's right hand sides
802*
803 IF( mycol.LT.npcol-1 ) THEN
804*
805 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
806 $ mycol+1 )
807*
808* Combine contribution to locally stored right hand sides
809*
810 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
811 $ b( part_offset+odd_size+1 ), lldb )
812*
813 END IF
814*
815*
816* The last processor does not participate in the solution of the
817* reduced system, having sent its contribution already.
818 IF( mycol.EQ.npcol-1 ) THEN
819 GO TO 30
820 END IF
821*
822*
823* *************************************
824* Modification Loop
825*
826* The distance for sending and receiving for each level starts
827* at 1 for the first level.
828 level_dist = 1
829*
830* Do until this proc is needed to modify other procs' equations
831*
832 10 CONTINUE
833 IF( mod( ( mycol+1 ) / level_dist, 2 ).NE.0 )
834 $ GO TO 20
835*
836* Receive and add contribution to righthand sides from left
837*
838 IF( mycol-level_dist.GE.0 ) THEN
839*
840 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
841 $ mycol-level_dist )
842*
843 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
844 $ b( part_offset+odd_size+1 ), lldb )
845*
846 END IF
847*
848* Receive and add contribution to righthand sides from right
849*
850 IF( mycol+level_dist.LT.npcol-1 ) THEN
851*
852 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
853 $ mycol+level_dist )
854*
855 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
856 $ b( part_offset+odd_size+1 ), lldb )
857*
858 END IF
859*
860 level_dist = level_dist*2
861*
862 GO TO 10
863 20 CONTINUE
864* [End of GOTO Loop]
865*
866*
867*
868* *********************************
869* Calculate and use this proc's blocks to modify other procs
870*
871* Solve with diagonal block
872*
873 CALL strtrs( 'L', 'N', 'N', bw, nrhs,
874 $ af( odd_size*bw+mbw2+1 ), bw,
875 $ b( part_offset+odd_size+1 ), lldb, info )
876*
877 IF( info.NE.0 ) THEN
878 GO TO 170
879 END IF
880*
881*
882*
883* *********
884 IF( mycol / level_dist.LE.( npcol-1 ) / level_dist-2 ) THEN
885*
886* Calculate contribution from this block to next diagonal block
887*
888 CALL sgemm( 'T', 'N', bw, nrhs, bw, -one,
889 $ af( ( odd_size )*bw+1 ), bw,
890 $ b( part_offset+odd_size+1 ), lldb, zero,
891 $ work( 1 ), bw )
892*
893* Send contribution to diagonal block's owning processor.
894*
895 CALL sgesd2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
896 $ mycol+level_dist )
897*
898 END IF
899* End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
900*
901* ************
902 IF( ( mycol / level_dist.GT.0 ) .AND.
903 $ ( mycol / level_dist.LE.( npcol-1 ) / level_dist-1 ) )
904 $ THEN
905*
906*
907* Use offdiagonal block to calculate modification to diag block
908* of processor to the left
909*
910 CALL sgemm( 'N', 'N', bw, nrhs, bw, -one,
911 $ af( odd_size*bw+2*mbw2+1 ), bw,
912 $ b( part_offset+odd_size+1 ), lldb, zero,
913 $ work( 1 ), bw )
914*
915* Send contribution to diagonal block's owning processor.
916*
917 CALL sgesd2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
918 $ mycol-level_dist )
919*
920 END IF
921* End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
922*
923 30 CONTINUE
924*
925 ELSE
926*
927******************** BACKSOLVE *************************************
928*
929********************************************************************
930* .. Begin reduced system phase of algorithm ..
931********************************************************************
932*
933*
934*
935* The last processor does not participate in the solution of the
936* reduced system and just waits to receive its solution.
937 IF( mycol.EQ.npcol-1 ) THEN
938 GO TO 80
939 END IF
940*
941* Determine number of steps in tree loop
942*
943 level_dist = 1
944 40 CONTINUE
945 IF( mod( ( mycol+1 ) / level_dist, 2 ).NE.0 )
946 $ GO TO 50
947*
948 level_dist = level_dist*2
949*
950 GO TO 40
951 50 CONTINUE
952*
953*
954 IF( ( mycol / level_dist.GT.0 ) .AND.
955 $ ( mycol / level_dist.LE.( npcol-1 ) / level_dist-1 ) )
956 $ THEN
957*
958* Receive solution from processor to left
959*
960 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
961 $ mycol-level_dist )
962*
963*
964* Use offdiagonal block to calculate modification to RHS stored
965* on this processor
966*
967 CALL sgemm( 'T', 'N', bw, nrhs, bw, -one,
968 $ af( odd_size*bw+2*mbw2+1 ), bw, work( 1 ),
969 $ bw, one, b( part_offset+odd_size+1 ), lldb )
970 END IF
971* End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
972*
973*
974 IF( mycol / level_dist.LE.( npcol-1 ) / level_dist-2 ) THEN
975*
976* Receive solution from processor to right
977*
978 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
979 $ mycol+level_dist )
980*
981* Calculate contribution from this block to next diagonal block
982*
983 CALL sgemm( 'N', 'N', bw, nrhs, bw, -one,
984 $ af( ( odd_size )*bw+1 ), bw, work( 1 ), bw,
985 $ one, b( part_offset+odd_size+1 ), lldb )
986*
987 END IF
988* End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
989*
990*
991* Solve with diagonal block
992*
993 CALL strtrs( 'L', 'T', 'N', bw, nrhs,
994 $ af( odd_size*bw+mbw2+1 ), bw,
995 $ b( part_offset+odd_size+1 ), lldb, info )
996*
997 IF( info.NE.0 ) THEN
998 GO TO 170
999 END IF
1000*
1001*
1002*
1003***Modification Loop *******
1004*
1005 60 CONTINUE
1006 IF( level_dist.EQ.1 )
1007 $ GO TO 70
1008*
1009 level_dist = level_dist / 2
1010*
1011* Send solution to the right
1012*
1013 IF( mycol+level_dist.LT.npcol-1 ) THEN
1014*
1015 CALL sgesd2d( ictxt, bw, nrhs,
1016 $ b( part_offset+odd_size+1 ), lldb, 0,
1017 $ mycol+level_dist )
1018*
1019 END IF
1020*
1021* Send solution to left
1022*
1023 IF( mycol-level_dist.GE.0 ) THEN
1024*
1025 CALL sgesd2d( ictxt, bw, nrhs,
1026 $ b( part_offset+odd_size+1 ), lldb, 0,
1027 $ mycol-level_dist )
1028*
1029 END IF
1030*
1031 GO TO 60
1032 70 CONTINUE
1033* [End of GOTO Loop]
1034*
1035 80 CONTINUE
1036* [Processor npcol - 1 jumped to here to await next stage]
1037*
1038*******************************
1039* Reduced system has been solved, communicate solutions to nearest
1040* neighbors in preparation for local computation phase.
1041*
1042*
1043* Send elements of solution to next proc
1044*
1045 IF( mycol.LT.npcol-1 ) THEN
1046*
1047 CALL sgesd2d( ictxt, bw, nrhs,
1048 $ b( part_offset+odd_size+1 ), lldb, 0,
1049 $ mycol+1 )
1050*
1051 END IF
1052*
1053* Receive modifications to processor's right hand sides
1054*
1055 IF( mycol.GT.0 ) THEN
1056*
1057 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1058 $ mycol-1 )
1059*
1060 END IF
1061*
1062*
1063*
1064**********************************************
1065* Local computation phase
1066**********************************************
1067*
1068 IF( mycol.NE.0 ) THEN
1069* Use the "spike" fillin to calculate contribution from previous
1070* processor's solution.
1071*
1072 CALL sgemm( 'N', 'N', odd_size, nrhs, bw, -one, af( 1 ),
1073 $ odd_size, work( 1+bw-bw ), bw, one,
1074 $ b( part_offset+1 ), lldb )
1075*
1076 END IF
1077*
1078*
1079 IF( mycol.LT.np-1 ) THEN
1080* Use factorization of odd-even connection block to modify
1081* locally stored portion of right hand side(s)
1082*
1083*
1084* First copy and multiply it into temporary storage,
1085* then use it on RHS
1086*
1087 CALL slamov( 'N', bw, nrhs, b( part_offset+odd_size+1 ),
1088 $ lldb, work( 1+bw-bw ), bw )
1089*
1090 CALL strmm( 'L', 'U', 'T', 'N', bw, nrhs, -one,
1091 $ a( ( ofst+( bw+1 )+( odd_size-bw )*llda ) ),
1092 $ llda-1, work( 1+bw-bw ), bw )
1093*
1094 CALL smatadd( bw, nrhs, one, work( 1+bw-bw ), bw, one,
1095 $ b( part_offset+odd_size-bw+1 ), lldb )
1096*
1097 END IF
1098*
1099* Use main partition in each processor to solve locally
1100*
1101 CALL stbtrs( uplo, 'T', 'N', odd_size, bw, nrhs,
1102 $ a( ofst+1 ), llda, b( part_offset+1 ), lldb,
1103 $ info )
1104*
1105 END IF
1106* End of "IF( LSAME( TRANS, 'N' ) )"...
1107*
1108*
1109 ELSE
1110***************************************************************
1111* CASE UPLO = 'U' *
1112***************************************************************
1113 IF( lsame( trans, 'T' ) ) THEN
1114*
1115* Frontsolve
1116*
1117*
1118******************************************
1119* Local computation phase
1120******************************************
1121*
1122* Use main partition in each processor to solve locally
1123*
1124 CALL stbtrs( uplo, 'T', 'N', odd_size, bw, nrhs,
1125 $ a( ofst+1 ), llda, b( part_offset+1 ), lldb,
1126 $ info )
1127*
1128*
1129 IF( mycol.LT.np-1 ) THEN
1130* Use factorization of odd-even connection block to modify
1131* locally stored portion of right hand side(s)
1132*
1133*
1134* First copy and multiply it into temporary storage,
1135* then use it on RHS
1136*
1137 CALL slamov( 'N', bw, nrhs,
1138 $ b( part_offset+odd_size-bw+1 ), lldb,
1139 $ work( 1 ), bw )
1140*
1141 CALL strmm( 'L', 'L', 'T', 'N', bw, nrhs, -one,
1142 $ a( ( ofst+1+odd_size*llda ) ), llda-1,
1143 $ work( 1 ), bw )
1144*
1145 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
1146 $ b( part_offset+odd_size+1 ), lldb )
1147*
1148 END IF
1149*
1150*
1151 IF( mycol.NE.0 ) THEN
1152* Use the "spike" fillin to calculate contribution to previous
1153* processor's righthand-side.
1154*
1155 CALL sgemm( 'T', 'N', bw, nrhs, odd_size, -one, af( 1 ),
1156 $ odd_size, b( part_offset+1 ), lldb, zero,
1157 $ work( 1+bw-bw ), bw )
1158 END IF
1159*
1160*
1161************************************************
1162* Formation and solution of reduced system
1163************************************************
1164*
1165*
1166* Send modifications to prior processor's right hand sides
1167*
1168 IF( mycol.GT.0 ) THEN
1169*
1170 CALL sgesd2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1171 $ mycol-1 )
1172*
1173 END IF
1174*
1175* Receive modifications to processor's right hand sides
1176*
1177 IF( mycol.LT.npcol-1 ) THEN
1178*
1179 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1180 $ mycol+1 )
1181*
1182* Combine contribution to locally stored right hand sides
1183*
1184 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
1185 $ b( part_offset+odd_size+1 ), lldb )
1186*
1187 END IF
1188*
1189*
1190* The last processor does not participate in the solution of the
1191* reduced system, having sent its contribution already.
1192 IF( mycol.EQ.npcol-1 ) THEN
1193 GO TO 110
1194 END IF
1195*
1196*
1197* *************************************
1198* Modification Loop
1199*
1200* The distance for sending and receiving for each level starts
1201* at 1 for the first level.
1202 level_dist = 1
1203*
1204* Do until this proc is needed to modify other procs' equations
1205*
1206 90 CONTINUE
1207 IF( mod( ( mycol+1 ) / level_dist, 2 ).NE.0 )
1208 $ GO TO 100
1209*
1210* Receive and add contribution to righthand sides from left
1211*
1212 IF( mycol-level_dist.GE.0 ) THEN
1213*
1214 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1215 $ mycol-level_dist )
1216*
1217 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
1218 $ b( part_offset+odd_size+1 ), lldb )
1219*
1220 END IF
1221*
1222* Receive and add contribution to righthand sides from right
1223*
1224 IF( mycol+level_dist.LT.npcol-1 ) THEN
1225*
1226 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1227 $ mycol+level_dist )
1228*
1229 CALL smatadd( bw, nrhs, one, work( 1 ), bw, one,
1230 $ b( part_offset+odd_size+1 ), lldb )
1231*
1232 END IF
1233*
1234 level_dist = level_dist*2
1235*
1236 GO TO 90
1237 100 CONTINUE
1238* [End of GOTO Loop]
1239*
1240*
1241*
1242* *********************************
1243* Calculate and use this proc's blocks to modify other procs
1244*
1245* Solve with diagonal block
1246*
1247 CALL strtrs( 'L', 'N', 'N', bw, nrhs,
1248 $ af( odd_size*bw+mbw2+1 ), bw,
1249 $ b( part_offset+odd_size+1 ), lldb, info )
1250*
1251 IF( info.NE.0 ) THEN
1252 GO TO 170
1253 END IF
1254*
1255*
1256*
1257* *********
1258 IF( mycol / level_dist.LE.( npcol-1 ) / level_dist-2 ) THEN
1259*
1260* Calculate contribution from this block to next diagonal block
1261*
1262 CALL sgemm( 'T', 'N', bw, nrhs, bw, -one,
1263 $ af( ( odd_size )*bw+1 ), bw,
1264 $ b( part_offset+odd_size+1 ), lldb, zero,
1265 $ work( 1 ), bw )
1266*
1267* Send contribution to diagonal block's owning processor.
1268*
1269 CALL sgesd2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1270 $ mycol+level_dist )
1271*
1272 END IF
1273* End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
1274*
1275* ************
1276 IF( ( mycol / level_dist.GT.0 ) .AND.
1277 $ ( mycol / level_dist.LE.( npcol-1 ) / level_dist-1 ) )
1278 $ THEN
1279*
1280*
1281* Use offdiagonal block to calculate modification to diag block
1282* of processor to the left
1283*
1284 CALL sgemm( 'N', 'N', bw, nrhs, bw, -one,
1285 $ af( odd_size*bw+2*mbw2+1 ), bw,
1286 $ b( part_offset+odd_size+1 ), lldb, zero,
1287 $ work( 1 ), bw )
1288*
1289* Send contribution to diagonal block's owning processor.
1290*
1291 CALL sgesd2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1292 $ mycol-level_dist )
1293*
1294 END IF
1295* End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
1296*
1297 110 CONTINUE
1298*
1299 ELSE
1300*
1301******************** BACKSOLVE *************************************
1302*
1303********************************************************************
1304* .. Begin reduced system phase of algorithm ..
1305********************************************************************
1306*
1307*
1308*
1309* The last processor does not participate in the solution of the
1310* reduced system and just waits to receive its solution.
1311 IF( mycol.EQ.npcol-1 ) THEN
1312 GO TO 160
1313 END IF
1314*
1315* Determine number of steps in tree loop
1316*
1317 level_dist = 1
1318 120 CONTINUE
1319 IF( mod( ( mycol+1 ) / level_dist, 2 ).NE.0 )
1320 $ GO TO 130
1321*
1322 level_dist = level_dist*2
1323*
1324 GO TO 120
1325 130 CONTINUE
1326*
1327*
1328 IF( ( mycol / level_dist.GT.0 ) .AND.
1329 $ ( mycol / level_dist.LE.( npcol-1 ) / level_dist-1 ) )
1330 $ THEN
1331*
1332* Receive solution from processor to left
1333*
1334 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1335 $ mycol-level_dist )
1336*
1337*
1338* Use offdiagonal block to calculate modification to RHS stored
1339* on this processor
1340*
1341 CALL sgemm( 'T', 'N', bw, nrhs, bw, -one,
1342 $ af( odd_size*bw+2*mbw2+1 ), bw, work( 1 ),
1343 $ bw, one, b( part_offset+odd_size+1 ), lldb )
1344 END IF
1345* End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
1346*
1347*
1348 IF( mycol / level_dist.LE.( npcol-1 ) / level_dist-2 ) THEN
1349*
1350* Receive solution from processor to right
1351*
1352 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1353 $ mycol+level_dist )
1354*
1355* Calculate contribution from this block to next diagonal block
1356*
1357 CALL sgemm( 'N', 'N', bw, nrhs, bw, -one,
1358 $ af( ( odd_size )*bw+1 ), bw, work( 1 ), bw,
1359 $ one, b( part_offset+odd_size+1 ), lldb )
1360*
1361 END IF
1362* End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
1363*
1364*
1365* Solve with diagonal block
1366*
1367 CALL strtrs( 'L', 'T', 'N', bw, nrhs,
1368 $ af( odd_size*bw+mbw2+1 ), bw,
1369 $ b( part_offset+odd_size+1 ), lldb, info )
1370*
1371 IF( info.NE.0 ) THEN
1372 GO TO 170
1373 END IF
1374*
1375*
1376*
1377***Modification Loop *******
1378*
1379 140 CONTINUE
1380 IF( level_dist.EQ.1 )
1381 $ GO TO 150
1382*
1383 level_dist = level_dist / 2
1384*
1385* Send solution to the right
1386*
1387 IF( mycol+level_dist.LT.npcol-1 ) THEN
1388*
1389 CALL sgesd2d( ictxt, bw, nrhs,
1390 $ b( part_offset+odd_size+1 ), lldb, 0,
1391 $ mycol+level_dist )
1392*
1393 END IF
1394*
1395* Send solution to left
1396*
1397 IF( mycol-level_dist.GE.0 ) THEN
1398*
1399 CALL sgesd2d( ictxt, bw, nrhs,
1400 $ b( part_offset+odd_size+1 ), lldb, 0,
1401 $ mycol-level_dist )
1402*
1403 END IF
1404*
1405 GO TO 140
1406 150 CONTINUE
1407* [End of GOTO Loop]
1408*
1409 160 CONTINUE
1410* [Processor npcol - 1 jumped to here to await next stage]
1411*
1412*******************************
1413* Reduced system has been solved, communicate solutions to nearest
1414* neighbors in preparation for local computation phase.
1415*
1416*
1417* Send elements of solution to next proc
1418*
1419 IF( mycol.LT.npcol-1 ) THEN
1420*
1421 CALL sgesd2d( ictxt, bw, nrhs,
1422 $ b( part_offset+odd_size+1 ), lldb, 0,
1423 $ mycol+1 )
1424*
1425 END IF
1426*
1427* Receive modifications to processor's right hand sides
1428*
1429 IF( mycol.GT.0 ) THEN
1430*
1431 CALL sgerv2d( ictxt, bw, nrhs, work( 1 ), bw, 0,
1432 $ mycol-1 )
1433*
1434 END IF
1435*
1436*
1437*
1438**********************************************
1439* Local computation phase
1440**********************************************
1441*
1442 IF( mycol.NE.0 ) THEN
1443* Use the "spike" fillin to calculate contribution from previous
1444* processor's solution.
1445*
1446 CALL sgemm( 'N', 'N', odd_size, nrhs, bw, -one, af( 1 ),
1447 $ odd_size, work( 1+bw-bw ), bw, one,
1448 $ b( part_offset+1 ), lldb )
1449*
1450 END IF
1451*
1452*
1453 IF( mycol.LT.np-1 ) THEN
1454* Use factorization of odd-even connection block to modify
1455* locally stored portion of right hand side(s)
1456*
1457*
1458* First copy and multiply it into temporary storage,
1459* then use it on RHS
1460*
1461 CALL slamov( 'N', bw, nrhs, b( part_offset+odd_size+1 ),
1462 $ lldb, work( 1+bw-bw ), bw )
1463*
1464 CALL strmm( 'L', 'L', 'N', 'N', bw, nrhs, -one,
1465 $ a( ( ofst+1+odd_size*llda ) ), llda-1,
1466 $ work( 1+bw-bw ), bw )
1467*
1468 CALL smatadd( bw, nrhs, one, work( 1+bw-bw ), bw, one,
1469 $ b( part_offset+odd_size-bw+1 ), lldb )
1470*
1471 END IF
1472*
1473* Use main partition in each processor to solve locally
1474*
1475 CALL stbtrs( uplo, 'N', 'N', odd_size, bw, nrhs,
1476 $ a( ofst+1 ), llda, b( part_offset+1 ), lldb,
1477 $ info )
1478*
1479 END IF
1480* End of "IF( LSAME( TRANS, 'N' ) )"...
1481*
1482*
1483 END IF
1484* End of "IF( LSAME( UPLO, 'L' ) )"...
1485 170 CONTINUE
1486*
1487*
1488* Free BLACS space used to hold standard-form grid.
1489*
1490 IF( ictxt_save.NE.ictxt_new ) THEN
1491 CALL blacs_gridexit( ictxt_new )
1492 END IF
1493*
1494 180 CONTINUE
1495*
1496* Restore saved input parameters
1497*
1498 ictxt = ictxt_save
1499 np = np_save
1500*
1501* Output minimum worksize
1502*
1503 work( 1 ) = work_size_min
1504*
1505*
1506 RETURN
1507*
1508* End of PSPBTRSV
1509*
1510 END
subroutine desc_convert(desc_in, desc_out, info)
Definition desc_convert.f:2
subroutine globchk(ictxt, n, x, ldx, iwork, info)
Definition pchkxmat.f:403
subroutine pspbtrsv(uplo, trans, n, bw, nrhs, a, ja, desca, b, ib, descb, af, laf, work, lwork, info)
Definition pspbtrsv.f:3
subroutine pxerbla(ictxt, srname, info)
Definition pxerbla.f:2
void reshape(Int *context_in, Int *major_in, Int *context_out, Int *major_out, Int *first_proc, Int *nprow_new, Int *npcol_new)
Definition reshape.c:80
subroutine smatadd(m, n, alpha, a, lda, beta, c, ldc)
Definition smatadd.f:2