135 SUBROUTINE zlqt02( M, N, K, A, AF, Q, L, LDA, TAU, WORK, LWORK,
144 INTEGER k, lda, lwork, m, n
147 DOUBLE PRECISION result( * ), rwork( * )
148 COMPLEX*16 a( lda, * ), af( lda, * ), l( lda, * ),
149 $ q( lda, * ), tau( * ), work( lwork )
155 DOUBLE PRECISION zero, one
156 parameter( zero = 0.0d+0, one = 1.0d+0 )
158 parameter( rogue = ( -1.0d+10, -1.0d+10 ) )
162 DOUBLE PRECISION anorm, eps, resid
172 INTRINSIC dble, dcmplx, max
178 common / srnamc / srnamt
186 CALL
zlaset(
'Full', m, n, rogue, rogue, q, lda )
187 CALL
zlacpy(
'Upper', k, n-1, af( 1, 2 ), lda, q( 1, 2 ), lda )
192 CALL
zunglq( m, n, k, q, lda, tau, work, lwork, info )
196 CALL
zlaset(
'Full', k, m, dcmplx( zero ), dcmplx( zero ), l,
198 CALL
zlacpy(
'Lower', k, m, af, lda, l, lda )
202 CALL
zgemm(
'No transpose',
'Conjugate transpose', k, m, n,
203 $ dcmplx( -one ), a, lda, q, lda, dcmplx( one ), l,
208 anorm =
zlange(
'1', k, n, a, lda, rwork )
209 resid =
zlange(
'1', k, m, l, lda, rwork )
210 IF( anorm.GT.zero )
THEN
211 result( 1 ) = ( ( resid / dble( max( 1, n ) ) ) / anorm ) / eps
218 CALL
zlaset(
'Full', m, m, dcmplx( zero ), dcmplx( one ), l, lda )
219 CALL
zherk(
'Upper',
'No transpose', m, n, -one, q, lda, one, l,
224 resid =
zlansy(
'1',
'Upper', m, l, lda, rwork )
226 result( 2 ) = ( resid / dble( max( 1, n ) ) ) / eps