SCALAPACK 2.2.2
LAPACK: Linear Algebra PACKage
Loading...
Searching...
No Matches
pzgetrf.f
Go to the documentation of this file.
1 SUBROUTINE pzgetrf( M, N, A, IA, JA, DESCA, IPIV, INFO )
2*
3* -- ScaLAPACK routine (version 1.7) --
4* University of Tennessee, Knoxville, Oak Ridge National Laboratory,
5* and University of California, Berkeley.
6* May 25, 2001
7*
8* .. Scalar Arguments ..
9 INTEGER IA, INFO, JA, M, N
10* ..
11* .. Array Arguments ..
12 INTEGER DESCA( * ), IPIV( * )
13 COMPLEX*16 A( * )
14* ..
15*
16* Purpose
17* =======
18*
19* PZGETRF computes an LU factorization of a general M-by-N distributed
20* matrix sub( A ) = (IA:IA+M-1,JA:JA+N-1) using partial pivoting with
21* row interchanges.
22*
23* The factorization has the form sub( A ) = P * L * U, where P is a
24* permutation matrix, L is lower triangular with unit diagonal ele-
25* ments (lower trapezoidal if m > n), and U is upper triangular
26* (upper trapezoidal if m < n). L and U are stored in sub( A ).
27*
28* This is the right-looking Parallel Level 3 BLAS version of the
29* algorithm.
30*
31* Notes
32* =====
33*
34* Each global data object is described by an associated description
35* vector. This vector stores the information required to establish
36* the mapping between an object element and its corresponding process
37* and memory location.
38*
39* Let A be a generic term for any 2D block cyclicly distributed array.
40* Such a global array has an associated description vector DESCA.
41* In the following comments, the character _ should be read as
42* "of the global array".
43*
44* NOTATION STORED IN EXPLANATION
45* --------------- -------------- --------------------------------------
46* DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
47* DTYPE_A = 1.
48* CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
49* the BLACS process grid A is distribu-
50* ted over. The context itself is glo-
51* bal, but the handle (the integer
52* value) may vary.
53* M_A (global) DESCA( M_ ) The number of rows in the global
54* array A.
55* N_A (global) DESCA( N_ ) The number of columns in the global
56* array A.
57* MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
58* the rows of the array.
59* NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
60* the columns of the array.
61* RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
62* row of the array A is distributed.
63* CSRC_A (global) DESCA( CSRC_ ) The process column over which the
64* first column of the array A is
65* distributed.
66* LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
67* array. LLD_A >= MAX(1,LOCr(M_A)).
68*
69* Let K be the number of rows or columns of a distributed matrix,
70* and assume that its process grid has dimension p x q.
71* LOCr( K ) denotes the number of elements of K that a process
72* would receive if K were distributed over the p processes of its
73* process column.
74* Similarly, LOCc( K ) denotes the number of elements of K that a
75* process would receive if K were distributed over the q processes of
76* its process row.
77* The values of LOCr() and LOCc() may be determined via a call to the
78* ScaLAPACK tool function, NUMROC:
79* LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
80* LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
81* An upper bound for these quantities may be computed by:
82* LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
83* LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
84*
85* This routine requires square block decomposition ( MB_A = NB_A ).
86*
87* Arguments
88* =========
89*
90* M (global input) INTEGER
91* The number of rows to be operated on, i.e. the number of rows
92* of the distributed submatrix sub( A ). M >= 0.
93*
94* N (global input) INTEGER
95* The number of columns to be operated on, i.e. the number of
96* columns of the distributed submatrix sub( A ). N >= 0.
97*
98* A (local input/local output) COMPLEX*16 pointer into the
99* local memory to an array of dimension (LLD_A, LOCc(JA+N-1)).
100* On entry, this array contains the local pieces of the M-by-N
101* distributed matrix sub( A ) to be factored. On exit, this
102* array contains the local pieces of the factors L and U from
103* the factorization sub( A ) = P*L*U; the unit diagonal ele-
104* ments of L are not stored.
105*
106* IA (global input) INTEGER
107* The row index in the global array A indicating the first
108* row of sub( A ).
109*
110* JA (global input) INTEGER
111* The column index in the global array A indicating the
112* first column of sub( A ).
113*
114* DESCA (global and local input) INTEGER array of dimension DLEN_.
115* The array descriptor for the distributed matrix A.
116*
117* IPIV (local output) INTEGER array, dimension ( LOCr(M_A)+MB_A )
118* This array contains the pivoting information.
119* IPIV(i) -> The global row local row i was swapped with.
120* This array is tied to the distributed matrix A.
121*
122* INFO (global output) INTEGER
123* = 0: successful exit
124* < 0: If the i-th argument is an array and the j-entry had
125* an illegal value, then INFO = -(i*100+j), if the i-th
126* argument is a scalar and had an illegal value, then
127* INFO = -i.
128* > 0: If INFO = K, U(IA+K-1,JA+K-1) is exactly zero.
129* The factorization has been completed, but the factor U
130* is exactly singular, and division by zero will occur if
131* it is used to solve a system of equations.
132*
133* =====================================================================
134*
135* .. Parameters ..
136 INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
137 $ LLD_, MB_, M_, NB_, N_, RSRC_
138 parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
139 $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
140 $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
141 COMPLEX*16 ONE
142 parameter( one = 1.0d+0 )
143* ..
144* .. Local Scalars ..
145 CHARACTER COLBTOP, COLCTOP, ROWBTOP
146 INTEGER I, ICOFF, ICTXT, IINFO, IN, IROFF, J, JB, JN,
147 $ MN, MYCOL, MYROW, NPCOL, NPROW
148* ..
149* .. Local Arrays ..
150 INTEGER IDUM1( 1 ), IDUM2( 1 )
151* ..
152* .. External Subroutines ..
153 EXTERNAL blacs_gridinfo, chk1mat, igamn2d, pchk1mat,
154 $ pb_topget, pb_topset, pxerbla, pzgemm, pzgetf2,
155 $ pzlaswp, pztrsm
156* ..
157* .. External Functions ..
158 INTEGER ICEIL
159 EXTERNAL iceil
160* ..
161* .. Intrinsic Functions ..
162 INTRINSIC min, mod
163* ..
164* .. Executable Statements ..
165*
166* Get grid parameters
167*
168 ictxt = desca( ctxt_ )
169 CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
170*
171* Test the input parameters
172*
173 info = 0
174 IF( nprow.EQ.-1 ) THEN
175 info = -(600+ctxt_)
176 ELSE
177 CALL chk1mat( m, 1, n, 2, ia, ja, desca, 6, info )
178 IF( info.EQ.0 ) THEN
179 iroff = mod( ia-1, desca( mb_ ) )
180 icoff = mod( ja-1, desca( nb_ ) )
181 IF( iroff.NE.0 ) THEN
182 info = -4
183 ELSE IF( icoff.NE.0 ) THEN
184 info = -5
185 ELSE IF( desca( mb_ ).NE.desca( nb_ ) ) THEN
186 info = -(600+nb_)
187 END IF
188 END IF
189 CALL pchk1mat( m, 1, n, 2, ia, ja, desca, 6, 0, idum1,
190 $ idum2, info )
191 END IF
192*
193 IF( info.NE.0 ) THEN
194 CALL pxerbla( ictxt, 'PZGETRF', -info )
195 RETURN
196 END IF
197*
198* Quick return if possible
199*
200 IF( desca( m_ ).EQ.1 ) THEN
201 ipiv( 1 ) = 1
202 RETURN
203 ELSE IF( m.EQ.0 .OR. n.EQ.0 ) THEN
204 RETURN
205 END IF
206*
207* Split-ring topology for the communication along process rows
208*
209 CALL pb_topget( ictxt, 'Broadcast', 'Rowwise', rowbtop )
210 CALL pb_topget( ictxt, 'Broadcast', 'Columnwise', colbtop )
211 CALL pb_topget( ictxt, 'Combine', 'Columnwise', colctop )
212 CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', 'S-ring' )
213 CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', ' ' )
214 CALL pb_topset( ictxt, 'Combine', 'Columnwise', ' ' )
215*
216* Handle the first block of columns separately
217*
218 mn = min( m, n )
219 in = min( iceil( ia, desca( mb_ ) )*desca( mb_ ), ia+m-1 )
220 jn = min( iceil( ja, desca( nb_ ) )*desca( nb_ ), ja+mn-1 )
221 jb = jn - ja + 1
222*
223* Factor diagonal and subdiagonal blocks and test for exact
224* singularity.
225*
226 CALL pzgetf2( m, jb, a, ia, ja, desca, ipiv, info )
227*
228 IF( jb+1.LE.n ) THEN
229*
230* Apply interchanges to columns JN+1:JA+N-1.
231*
232 CALL pzlaswp( 'Forward', 'Rows', n-jb, a, ia, jn+1, desca,
233 $ ia, in, ipiv )
234*
235* Compute block row of U.
236*
237 CALL pztrsm( 'Left', 'Lower', 'No transpose', 'Unit', jb,
238 $ n-jb, one, a, ia, ja, desca, a, ia, jn+1, desca )
239*
240 IF( jb+1.LE.m ) THEN
241*
242* Update trailing submatrix.
243*
244 CALL pzgemm( 'No transpose', 'No transpose', m-jb, n-jb, jb,
245 $ -one, a, in+1, ja, desca, a, ia, jn+1, desca,
246 $ one, a, in+1, jn+1, desca )
247*
248 END IF
249 END IF
250*
251* Loop over the remaining blocks of columns.
252*
253 DO 10 j = jn+1, ja+mn-1, desca( nb_ )
254 jb = min( mn-j+ja, desca( nb_ ) )
255 i = ia + j - ja
256*
257* Factor diagonal and subdiagonal blocks and test for exact
258* singularity.
259*
260 CALL pzgetf2( m-j+ja, jb, a, i, j, desca, ipiv, iinfo )
261*
262 IF( info.EQ.0 .AND. iinfo.GT.0 )
263 $ info = iinfo + j - ja
264*
265* Apply interchanges to columns JA:J-JA.
266*
267 CALL pzlaswp( 'Forward', 'Rowwise', j-ja, a, ia, ja, desca,
268 $ i, i+jb-1, ipiv )
269*
270 IF( j-ja+jb+1.LE.n ) THEN
271*
272* Apply interchanges to columns J+JB:JA+N-1.
273*
274 CALL pzlaswp( 'Forward', 'Rowwise', n-j-jb+ja, a, ia, j+jb,
275 $ desca, i, i+jb-1, ipiv )
276*
277* Compute block row of U.
278*
279 CALL pztrsm( 'Left', 'Lower', 'No transpose', 'Unit', jb,
280 $ n-j-jb+ja, one, a, i, j, desca, a, i, j+jb,
281 $ desca )
282*
283 IF( j-ja+jb+1.LE.m ) THEN
284*
285* Update trailing submatrix.
286*
287 CALL pzgemm( 'No transpose', 'No transpose', m-j-jb+ja,
288 $ n-j-jb+ja, jb, -one, a, i+jb, j, desca, a,
289 $ i, j+jb, desca, one, a, i+jb, j+jb, desca )
290*
291 END IF
292 END IF
293*
294 10 CONTINUE
295*
296 IF( info.EQ.0 )
297 $ info = mn + 1
298 CALL igamn2d( ictxt, 'Rowwise', ' ', 1, 1, info, 1, idum1, idum2,
299 $ -1, -1, mycol )
300 IF( info.EQ.mn+1 )
301 $ info = 0
302*
303 CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', rowbtop )
304 CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', colbtop )
305 CALL pb_topset( ictxt, 'Combine', 'Columnwise', colctop )
306*
307 RETURN
308*
309* End of PZGETRF
310*
311 END
subroutine chk1mat(ma, mapos0, na, napos0, ia, ja, desca, descapos0, info)
Definition chk1mat.f:3
#define min(A, B)
Definition pcgemr.c:181
subroutine pchk1mat(ma, mapos0, na, napos0, ia, ja, desca, descapos0, nextra, ex, expos, info)
Definition pchkxmat.f:3
subroutine pxerbla(ictxt, srname, info)
Definition pxerbla.f:2
subroutine pzgetf2(m, n, a, ia, ja, desca, ipiv, info)
Definition pzgetf2.f:2
subroutine pzgetrf(m, n, a, ia, ja, desca, ipiv, info)
Definition pzgetrf.f:2
subroutine pzlaswp(direc, rowcol, n, a, ia, ja, desca, k1, k2, ipiv)
Definition pzlaswp.f:3