268 SUBROUTINE dlasda( ICOMPQ, SMLSIZ, N, SQRE, D, E, U, LDU, VT,
270 $ DIFL, DIFR, Z, POLES, GIVPTR, GIVCOL, LDGCOL,
271 $ PERM, GIVNUM, C, S, WORK, IWORK, INFO )
278 INTEGER ICOMPQ, INFO, LDGCOL, LDU, N, SMLSIZ, SQRE
281 INTEGER GIVCOL( LDGCOL, * ), GIVPTR( * ), IWORK( * ),
282 $ K( * ), PERM( LDGCOL, * )
283 DOUBLE PRECISION C( * ), D( * ), DIFL( LDU, * ), DIFR( LDU, * ),
284 $ e( * ), givnum( ldu, * ), poles( ldu, * ),
285 $ s( * ), u( ldu, * ), vt( ldu, * ), work( * ),
292 DOUBLE PRECISION ZERO, ONE
293 PARAMETER ( ZERO = 0.0d+0, one = 1.0d+0 )
296 INTEGER I, I1, IC, IDXQ, IDXQI, IM1, INODE, ITEMP, IWK,
297 $ J, LF, LL, LVL, LVL2, M, NCC, ND, NDB1, NDIML,
298 $ NDIMR, NL, NLF, NLP1, NLVL, NR, NRF, NRP1, NRU,
299 $ nwork1, nwork2, smlszp, sqrei, vf, vfi, vl, vli
300 DOUBLE PRECISION ALPHA, BETA
312 IF( ( icompq.LT.0 ) .OR. ( icompq.GT.1 ) )
THEN
314 ELSE IF( smlsiz.LT.3 )
THEN
316 ELSE IF( n.LT.0 )
THEN
318 ELSE IF( ( sqre.LT.0 ) .OR. ( sqre.GT.1 ) )
THEN
320 ELSE IF( ldu.LT.( n+sqre ) )
THEN
322 ELSE IF( ldgcol.LT.n )
THEN
326 CALL xerbla(
'DLASDA', -info )
334 IF( n.LE.smlsiz )
THEN
335 IF( icompq.EQ.0 )
THEN
336 CALL dlasdq(
'U', sqre, n, 0, 0, 0, d, e, vt, ldu, u,
338 $ u, ldu, work, info )
340 CALL dlasdq(
'U', sqre, n, m, n, 0, d, e, vt, ldu, u,
342 $ u, ldu, work, info )
362 nwork2 = nwork1 + smlszp*smlszp
364 CALL dlasdt( n, nlvl, nd, iwork( inode ), iwork( ndiml ),
365 $ iwork( ndimr ), smlsiz )
380 ic = iwork( inode+i1 )
381 nl = iwork( ndiml+i1 )
383 nr = iwork( ndimr+i1 )
386 idxqi = idxq + nlf - 2
390 IF( icompq.EQ.0 )
THEN
391 CALL dlaset(
'A', nlp1, nlp1, zero, one, work( nwork1 ),
393 CALL dlasdq(
'U', sqrei, nl, nlp1, nru, ncc, d( nlf ),
394 $ e( nlf ), work( nwork1 ), smlszp,
395 $ work( nwork2 ), nl, work( nwork2 ), nl,
396 $ work( nwork2 ), info )
397 itemp = nwork1 + nl*smlszp
398 CALL dcopy( nlp1, work( nwork1 ), 1, work( vfi ), 1 )
399 CALL dcopy( nlp1, work( itemp ), 1, work( vli ), 1 )
401 CALL dlaset(
'A', nl, nl, zero, one, u( nlf, 1 ), ldu )
402 CALL dlaset(
'A', nlp1, nlp1, zero, one, vt( nlf, 1 ),
404 CALL dlasdq(
'U', sqrei, nl, nlp1, nl, ncc, d( nlf ),
405 $ e( nlf ), vt( nlf, 1 ), ldu, u( nlf, 1 ), ldu,
406 $ u( nlf, 1 ), ldu, work( nwork1 ), info )
407 CALL dcopy( nlp1, vt( nlf, 1 ), 1, work( vfi ), 1 )
408 CALL dcopy( nlp1, vt( nlf, nlp1 ), 1, work( vli ), 1 )
416 IF( ( i.EQ.nd ) .AND. ( sqre.EQ.0 ) )
THEN
425 IF( icompq.EQ.0 )
THEN
426 CALL dlaset(
'A', nrp1, nrp1, zero, one, work( nwork1 ),
428 CALL dlasdq(
'U', sqrei, nr, nrp1, nru, ncc, d( nrf ),
429 $ e( nrf ), work( nwork1 ), smlszp,
430 $ work( nwork2 ), nr, work( nwork2 ), nr,
431 $ work( nwork2 ), info )
432 itemp = nwork1 + ( nrp1-1 )*smlszp
433 CALL dcopy( nrp1, work( nwork1 ), 1, work( vfi ), 1 )
434 CALL dcopy( nrp1, work( itemp ), 1, work( vli ), 1 )
436 CALL dlaset(
'A', nr, nr, zero, one, u( nrf, 1 ), ldu )
437 CALL dlaset(
'A', nrp1, nrp1, zero, one, vt( nrf, 1 ),
439 CALL dlasdq(
'U', sqrei, nr, nrp1, nr, ncc, d( nrf ),
440 $ e( nrf ), vt( nrf, 1 ), ldu, u( nrf, 1 ), ldu,
441 $ u( nrf, 1 ), ldu, work( nwork1 ), info )
442 CALL dcopy( nrp1, vt( nrf, 1 ), 1, work( vfi ), 1 )
443 CALL dcopy( nrp1, vt( nrf, nrp1 ), 1, work( vli ), 1 )
456 DO 50 lvl = nlvl, 1, -1
471 ic = iwork( inode+im1 )
472 nl = iwork( ndiml+im1 )
473 nr = iwork( ndimr+im1 )
483 idxqi = idxq + nlf - 1
486 IF( icompq.EQ.0 )
THEN
487 CALL dlasd6( icompq, nl, nr, sqrei, d( nlf ),
488 $ work( vfi ), work( vli ), alpha, beta,
489 $ iwork( idxqi ), perm, givptr( 1 ), givcol,
490 $ ldgcol, givnum, ldu, poles, difl, difr, z,
491 $ k( 1 ), c( 1 ), s( 1 ), work( nwork1 ),
492 $ iwork( iwk ), info )
495 CALL dlasd6( icompq, nl, nr, sqrei, d( nlf ),
496 $ work( vfi ), work( vli ), alpha, beta,
497 $ iwork( idxqi ), perm( nlf, lvl ),
498 $ givptr( j ), givcol( nlf, lvl2 ), ldgcol,
499 $ givnum( nlf, lvl2 ), ldu,
500 $ poles( nlf, lvl2 ), difl( nlf, lvl ),
501 $ difr( nlf, lvl2 ), z( nlf, lvl ), k( j ),
502 $ c( j ), s( j ), work( nwork1 ),
503 $ iwork( iwk ), info )