ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pcpotrf.f
Go to the documentation of this file.
1  SUBROUTINE pcpotrf( UPLO, N, A, IA, JA, DESCA, INFO )
2 *
3 * -- ScaLAPACK routine (version 1.7) --
4 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
5 * and University of California, Berkeley.
6 * May 25, 2001
7 *
8 * .. Scalar Arguments ..
9  CHARACTER UPLO
10  INTEGER IA, INFO, JA, N
11 * ..
12 * .. Array Arguments ..
13  INTEGER DESCA( * )
14  COMPLEX A( * )
15 * ..
16 *
17 * Purpose
18 * =======
19 *
20 * PCPOTRF computes the Cholesky factorization of an N-by-N complex
21 * hermitian positive definite distributed matrix sub( A ) denoting
22 * A(IA:IA+N-1, JA:JA+N-1).
23 *
24 * The factorization has the form
25 *
26 * sub( A ) = U' * U , if UPLO = 'U', or
27 *
28 * sub( A ) = L * L', if UPLO = 'L',
29 *
30 * where U is an upper triangular matrix and L is lower triangular.
31 *
32 * Notes
33 * =====
34 *
35 * Each global data object is described by an associated description
36 * vector. This vector stores the information required to establish
37 * the mapping between an object element and its corresponding process
38 * and memory location.
39 *
40 * Let A be a generic term for any 2D block cyclicly distributed array.
41 * Such a global array has an associated description vector DESCA.
42 * In the following comments, the character _ should be read as
43 * "of the global array".
44 *
45 * NOTATION STORED IN EXPLANATION
46 * --------------- -------------- --------------------------------------
47 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
48 * DTYPE_A = 1.
49 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
50 * the BLACS process grid A is distribu-
51 * ted over. The context itself is glo-
52 * bal, but the handle (the integer
53 * value) may vary.
54 * M_A (global) DESCA( M_ ) The number of rows in the global
55 * array A.
56 * N_A (global) DESCA( N_ ) The number of columns in the global
57 * array A.
58 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
59 * the rows of the array.
60 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
61 * the columns of the array.
62 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
63 * row of the array A is distributed.
64 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
65 * first column of the array A is
66 * distributed.
67 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
68 * array. LLD_A >= MAX(1,LOCr(M_A)).
69 *
70 * Let K be the number of rows or columns of a distributed matrix,
71 * and assume that its process grid has dimension p x q.
72 * LOCr( K ) denotes the number of elements of K that a process
73 * would receive if K were distributed over the p processes of its
74 * process column.
75 * Similarly, LOCc( K ) denotes the number of elements of K that a
76 * process would receive if K were distributed over the q processes of
77 * its process row.
78 * The values of LOCr() and LOCc() may be determined via a call to the
79 * ScaLAPACK tool function, NUMROC:
80 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
81 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
82 * An upper bound for these quantities may be computed by:
83 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
84 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
85 *
86 * This routine requires square block decomposition ( MB_A = NB_A ).
87 *
88 * Arguments
89 * =========
90 *
91 * UPLO (global input) CHARACTER
92 * = 'U': Upper triangle of sub( A ) is stored;
93 * = 'L': Lower triangle of sub( A ) is stored.
94 *
95 * N (global input) INTEGER
96 * The number of rows and columns to be operated on, i.e. the
97 * order of the distributed submatrix sub( A ). N >= 0.
98 *
99 * A (local input/local output) COMPLEX pointer into the
100 * local memory to an array of dimension (LLD_A, LOCc(JA+N-1)).
101 * On entry, this array contains the local pieces of the
102 * N-by-N Hermitian distributed matrix sub( A ) to be factored.
103 * If UPLO = 'U', the leading N-by-N upper triangular part of
104 * sub( A ) contains the upper triangular part of the matrix,
105 * and its strictly lower triangular part is not referenced.
106 * If UPLO = 'L', the leading N-by-N lower triangular part of
107 * sub( A ) contains the lower triangular part of the distribu-
108 * ted matrix, and its strictly upper triangular part is not
109 * referenced. On exit, if UPLO = 'U', the upper triangular
110 * part of the distributed matrix contains the Cholesky factor
111 * U, if UPLO = 'L', the lower triangular part of the distribu-
112 * ted matrix contains the Cholesky factor L.
113 *
114 * IA (global input) INTEGER
115 * The row index in the global array A indicating the first
116 * row of sub( A ).
117 *
118 * JA (global input) INTEGER
119 * The column index in the global array A indicating the
120 * first column of sub( A ).
121 *
122 * DESCA (global and local input) INTEGER array of dimension DLEN_.
123 * The array descriptor for the distributed matrix A.
124 *
125 * INFO (global output) INTEGER
126 * = 0: successful exit
127 * < 0: If the i-th argument is an array and the j-entry had
128 * an illegal value, then INFO = -(i*100+j), if the i-th
129 * argument is a scalar and had an illegal value, then
130 * INFO = -i.
131 * > 0: If INFO = K, the leading minor of order K,
132 * A(IA:IA+K-1,JA:JA+K-1) is not positive definite, and
133 * the factorization could not be completed.
134 *
135 * =====================================================================
136 *
137 * .. Parameters ..
138  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
139  $ LLD_, MB_, M_, NB_, N_, RSRC_
140  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
141  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
142  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
143  REAL ONE
144  parameter( one = 1.0e+0 )
145  COMPLEX CONE
146  parameter( cone = ( 1.0e+0, 0.0e+0 ) )
147 * ..
148 * .. Local Scalars ..
149  LOGICAL UPPER
150  CHARACTER COLBTOP, ROWBTOP
151  INTEGER I, ICOFF, ICTXT, IROFF, J, JB, JN, MYCOL,
152  $ MYROW, NPCOL, NPROW
153 * ..
154 * .. Local Arrays ..
155  INTEGER IDUM1( 1 ), IDUM2( 1 )
156 * ..
157 * .. External Subroutines ..
158  EXTERNAL blacs_gridinfo, chk1mat, pchk1mat, pb_topget,
159  $ pb_topset, pcpotf2, pcherk, pctrsm,
160  $ pxerbla
161 * ..
162 * .. External Functions ..
163  LOGICAL LSAME
164  INTEGER ICEIL
165  EXTERNAL iceil, lsame
166 * ..
167 * .. Intrinsic Functions ..
168  INTRINSIC ichar, min, mod
169 * ..
170 * .. Executable Statements ..
171 *
172 * Get grid parameters
173 *
174  ictxt = desca( ctxt_ )
175  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
176 *
177 * Test the input parameters
178 *
179  info = 0
180  IF( nprow.EQ.-1 ) THEN
181  info = -(600+ctxt_)
182  ELSE
183  CALL chk1mat( n, 2, n, 2, ia, ja, desca, 6, info )
184  upper = lsame( uplo, 'U' )
185  IF( info.EQ.0 ) THEN
186  iroff = mod( ia-1, desca( mb_ ) )
187  icoff = mod( ja-1, desca( nb_ ) )
188  IF ( .NOT.upper .AND. .NOT.lsame( uplo, 'L' ) ) THEN
189  info = -1
190  ELSE IF( iroff.NE.0 ) THEN
191  info = -4
192  ELSE IF( icoff.NE.0 ) THEN
193  info = -5
194  ELSE IF( desca( mb_ ).NE.desca( nb_ ) ) THEN
195  info = -(600+nb_)
196  END IF
197  END IF
198  IF( upper ) THEN
199  idum1( 1 ) = ichar( 'U' )
200  ELSE
201  idum1( 1 ) = ichar( 'L' )
202  END IF
203  idum2( 1 ) = 1
204  CALL pchk1mat( n, 2, n, 2, ia, ja, desca, 6, 1, idum1, idum2,
205  $ info )
206  END IF
207 *
208  IF( info.NE.0 ) THEN
209  CALL pxerbla( ictxt, 'PCPOTRF', -info )
210  RETURN
211  END IF
212 *
213 * Quick return if possible
214 *
215  IF( n.EQ.0 )
216  $ RETURN
217 *
218  CALL pb_topget( ictxt, 'Broadcast', 'Rowwise', rowbtop )
219  CALL pb_topget( ictxt, 'Broadcast', 'Columnwise', colbtop )
220 *
221  IF( upper ) THEN
222 *
223 * Split-ring topology for the communication along process
224 * columns, 1-tree topology along process rows.
225 *
226  CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', ' ' )
227  CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', 'S-ring' )
228 *
229 * A is upper triangular, compute Cholesky factorization A = U'*U.
230 *
231 * Handle the first block of columns separately
232 *
233  jn = min( iceil( ja, desca( nb_ ) )*desca(nb_), ja+n-1 )
234  jb = jn - ja + 1
235 *
236 * Perform unblocked Cholesky factorization on JB block
237 *
238  CALL pcpotf2( uplo, jb, a, ia, ja, desca, info )
239  IF( info.NE.0 )
240  $ GO TO 30
241 *
242  IF( jb+1.LE.n ) THEN
243 *
244 * Form the row panel of U using the triangular solver
245 *
246  CALL pctrsm( 'Left', uplo, 'Conjugate transpose',
247  $ 'Non-Unit', jb, n-jb, cone, a, ia, ja, desca,
248  $ a, ia, ja+jb, desca )
249 *
250 * Update the trailing matrix, A = A - U'*U
251 *
252  CALL pcherk( uplo, 'Conjugate transpose', n-jb, jb, -one, a,
253  $ ia, ja+jb, desca, one, a, ia+jb, ja+jb, desca )
254  END IF
255 *
256 * Loop over remaining block of columns
257 *
258  DO 10 j = jn+1, ja+n-1, desca( nb_ )
259  jb = min( n-j+ja, desca( nb_ ) )
260  i = ia + j - ja
261 *
262 * Perform unblocked Cholesky factorization on JB block
263 *
264  CALL pcpotf2( uplo, jb, a, i, j, desca, info )
265  IF( info.NE.0 ) THEN
266  info = info + j - ja
267  GO TO 30
268  END IF
269 *
270  IF( j-ja+jb+1.LE.n ) THEN
271 *
272 * Form the row panel of U using the triangular solver
273 *
274  CALL pctrsm( 'Left', uplo, 'Conjugate transpose',
275  $ 'Non-Unit', jb, n-j-jb+ja, cone, a, i, j,
276  $ desca, a, i, j+jb, desca )
277 *
278 * Update the trailing matrix, A = A - U'*U
279 *
280  CALL pcherk( uplo, 'Conjugate transpose', n-j-jb+ja, jb,
281  $ -one, a, i, j+jb, desca, one, a, i+jb,
282  $ j+jb, desca )
283  END IF
284  10 CONTINUE
285 *
286  ELSE
287 *
288 * 1-tree topology for the communication along process columns,
289 * Split-ring topology along process rows.
290 *
291  CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', 'S-ring' )
292  CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', ' ' )
293 *
294 * A is lower triangular, compute Cholesky factorization A = L*L'
295 * (right-looking)
296 *
297 * Handle the first block of columns separately
298 *
299  jn = min( iceil( ja, desca( nb_ ) )*desca( nb_ ), ja+n-1 )
300  jb = jn - ja + 1
301 *
302 * Perform unblocked Cholesky factorization on JB block
303 *
304  CALL pcpotf2( uplo, jb, a, ia, ja, desca, info )
305  IF( info.NE.0 )
306  $ GO TO 30
307 *
308  IF( jb+1.LE.n ) THEN
309 *
310 * Form the column panel of L using the triangular solver
311 *
312  CALL pctrsm( 'Right', uplo, 'Conjugate transpose',
313  $ 'Non-Unit', n-jb, jb, cone, a, ia, ja, desca,
314  $ a, ia+jb, ja, desca )
315 *
316 * Update the trailing matrix, A = A - L*L'
317 *
318  CALL pcherk( uplo, 'No Transpose', n-jb, jb, -one, a, ia+jb,
319  $ ja, desca, one, a, ia+jb, ja+jb, desca )
320 *
321  END IF
322 *
323  DO 20 j = jn+1, ja+n-1, desca( nb_ )
324  jb = min( n-j+ja, desca( nb_ ) )
325  i = ia + j - ja
326 *
327 * Perform unblocked Cholesky factorization on JB block
328 *
329  CALL pcpotf2( uplo, jb, a, i, j, desca, info )
330  IF( info.NE.0 ) THEN
331  info = info + j - ja
332  GO TO 30
333  END IF
334 *
335  IF( j-ja+jb+1.LE.n ) THEN
336 *
337 * Form the column panel of L using the triangular solver
338 *
339  CALL pctrsm( 'Right', uplo, 'Conjugate transpose',
340  $ 'Non-Unit', n-j-jb+ja, jb, cone, a, i, j,
341  $ desca, a, i+jb, j, desca )
342 *
343 * Update the trailing matrix, A = A - L*L'
344 *
345  CALL pcherk( uplo, 'No Transpose', n-j-jb+ja, jb, -one,
346  $ a, i+jb, j, desca, one, a, i+jb, j+jb,
347  $ desca )
348 *
349  END IF
350  20 CONTINUE
351 *
352  END IF
353 *
354  30 CONTINUE
355 *
356  CALL pb_topset( ictxt, 'Broadcast', 'Rowwise', rowbtop )
357  CALL pb_topset( ictxt, 'Broadcast', 'Columnwise', colbtop )
358 *
359  RETURN
360 *
361 * End of PCPOTRF
362 *
363  END
pcpotf2
subroutine pcpotf2(UPLO, N, A, IA, JA, DESCA, INFO)
Definition: pcpotf2.f:2
pcpotrf
subroutine pcpotrf(UPLO, N, A, IA, JA, DESCA, INFO)
Definition: pcpotrf.f:2
pchk1mat
subroutine pchk1mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, NEXTRA, EX, EXPOS, INFO)
Definition: pchkxmat.f:3
chk1mat
subroutine chk1mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, INFO)
Definition: chk1mat.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181