381 $ AF, LDAF, COLEQU, C, B, LDB, Y,
382 $ LDY, BERR_OUT, N_NORMS,
383 $ ERR_BNDS_NORM, ERR_BNDS_COMP, RES,
384 $ AYB, DY, Y_TAIL, RCOND, ITHRESH,
385 $ RTHRESH, DZ_UB, IGNORE_CWISE,
393 INTEGER INFO, LDA, LDAF, LDB, LDY, N, NRHS, PREC_TYPE,
396 LOGICAL COLEQU, IGNORE_CWISE
397 DOUBLE PRECISION RTHRESH, DZ_UB
400 COMPLEX*16 A( LDA, * ), AF( LDAF, * ), B( LDB, * ),
401 $ y( ldy, * ), res( * ), dy( * ), y_tail( * )
402 DOUBLE PRECISION C( * ), AYB( * ), RCOND, BERR_OUT( * ),
403 $ err_bnds_norm( nrhs, * ),
404 $ err_bnds_comp( nrhs, * )
410 INTEGER UPLO2, CNT, I, J, X_STATE, Z_STATE,
412 DOUBLE PRECISION YK, DYK, YMIN, NORMY, NORMX, NORMDX, DXRAT,
413 $ DZRAT, PREVNORMDX, PREV_DZ_Z, DXRATMAX,
414 $ DZRATMAX, DX_X, DZ_Z, FINAL_DX_X, FINAL_DZ_Z,
415 $ EPS, HUGEVAL, INCR_THRESH
420 INTEGER UNSTABLE_STATE, WORKING_STATE, CONV_STATE,
421 $ NOPROG_STATE, BASE_RESIDUAL, EXTRA_RESIDUAL,
423 parameter( unstable_state = 0, working_state = 1,
424 $ conv_state = 2, noprog_state = 3 )
425 parameter( base_residual = 0, extra_residual = 1,
427 INTEGER FINAL_NRM_ERR_I, FINAL_CMP_ERR_I, BERR_I
428 INTEGER RCOND_I, NRM_RCOND_I, NRM_ERR_I, CMP_RCOND_I
429 INTEGER CMP_ERR_I, PIV_GROWTH_I
430 PARAMETER ( FINAL_NRM_ERR_I = 1, final_cmp_err_i = 2,
432 parameter( rcond_i = 4, nrm_rcond_i = 5, nrm_err_i = 6 )
433 parameter( cmp_rcond_i = 7, cmp_err_i = 8,
435 INTEGER LA_LINRX_ITREF_I, LA_LINRX_ITHRESH_I,
437 parameter( la_linrx_itref_i = 1,
438 $ la_linrx_ithresh_i = 2 )
439 parameter( la_linrx_cwise_i = 3 )
440 INTEGER LA_LINRX_TRUST_I, LA_LINRX_ERR_I,
442 parameter( la_linrx_trust_i = 1, la_linrx_err_i = 2 )
443 parameter( la_linrx_rcond_i = 3 )
454 DOUBLE PRECISION DLAMCH
457 INTRINSIC abs, dble, dimag, max, min
460 DOUBLE PRECISION CABS1
463 cabs1( zdum ) = abs( dble( zdum ) ) + abs( dimag( zdum ) )
467 IF (info.NE.0)
RETURN
468 eps = dlamch(
'Epsilon' )
469 hugeval = dlamch(
'Overflow' )
471 hugeval = hugeval * hugeval
473 incr_thresh = dble(n) * eps
475 IF (lsame(uplo,
'L'))
THEN
476 uplo2 = ilauplo(
'L' )
478 uplo2 = ilauplo(
'U' )
482 y_prec_state = extra_residual
483 IF (y_prec_state .EQ. extra_y)
THEN
500 x_state = working_state
501 z_state = unstable_state
509 CALL zcopy( n, b( 1, j ), 1, res, 1 )
510 IF (y_prec_state .EQ. base_residual)
THEN
511 CALL zhemv(uplo, n, dcmplx(-1.0d+0), a, lda, y(1,j), 1,
512 $ dcmplx(1.0d+0), res, 1)
513 ELSE IF (y_prec_state .EQ. extra_residual)
THEN
514 CALL blas_zhemv_x(uplo2, n, dcmplx(-1.0d+0), a, lda,
515 $ y( 1, j ), 1, dcmplx(1.0d+0), res, 1, prec_type)
517 CALL blas_zhemv2_x(uplo2, n, dcmplx(-1.0d+0), a, lda,
518 $ y(1, j), y_tail, 1, dcmplx(1.0d+0), res, 1,
523 CALL zcopy( n, res, 1, dy, 1 )
524 CALL zpotrs( uplo, n, 1, af, ldaf, dy, n, info)
538 IF (yk .NE. 0.0d+0)
THEN
539 dz_z = max( dz_z, dyk / yk )
540 ELSE IF (dyk .NE. 0.0d+0)
THEN
544 ymin = min( ymin, yk )
546 normy = max( normy, yk )
549 normx = max(normx, yk * c(i))
550 normdx = max(normdx, dyk * c(i))
553 normdx = max(normdx, dyk)
557 IF (normx .NE. 0.0d+0)
THEN
558 dx_x = normdx / normx
559 ELSE IF (normdx .EQ. 0.0d+0)
THEN
565 dxrat = normdx / prevnormdx
566 dzrat = dz_z / prev_dz_z
570 IF (ymin*rcond .LT. incr_thresh*normy
571 $ .AND. y_prec_state .LT. extra_y)
574 IF (x_state .EQ. noprog_state .AND. dxrat .LE. rthresh)
575 $ x_state = working_state
576 IF (x_state .EQ. working_state)
THEN
577 IF (dx_x .LE. eps)
THEN
579 ELSE IF (dxrat .GT. rthresh)
THEN
580 IF (y_prec_state .NE. extra_y)
THEN
583 x_state = noprog_state
586 IF (dxrat .GT. dxratmax) dxratmax = dxrat
588 IF (x_state .GT. working_state) final_dx_x = dx_x
591 IF (z_state .EQ. unstable_state .AND. dz_z .LE. dz_ub)
592 $ z_state = working_state
593 IF (z_state .EQ. noprog_state .AND. dzrat .LE. rthresh)
594 $ z_state = working_state
595 IF (z_state .EQ. working_state)
THEN
596 IF (dz_z .LE. eps)
THEN
598 ELSE IF (dz_z .GT. dz_ub)
THEN
599 z_state = unstable_state
602 ELSE IF (dzrat .GT. rthresh)
THEN
603 IF (y_prec_state .NE. extra_y)
THEN
606 z_state = noprog_state
609 IF (dzrat .GT. dzratmax) dzratmax = dzrat
611 IF (z_state .GT. working_state) final_dz_z = dz_z
614 IF ( x_state.NE.working_state.AND.
615 $ (ignore_cwise.OR.z_state.NE.working_state) )
620 y_prec_state = y_prec_state + 1
631 IF (y_prec_state .LT. extra_y)
THEN
632 CALL zaxpy( n, dcmplx(1.0d+0), dy, 1, y(1,j), 1 )
643 IF (x_state .EQ. working_state) final_dx_x = dx_x
644 IF (z_state .EQ. working_state) final_dz_z = dz_z
648 IF (n_norms .GE. 1)
THEN
649 err_bnds_norm( j, la_linrx_err_i ) =
650 $ final_dx_x / (1 - dxratmax)
652 IF (n_norms .GE. 2)
THEN
653 err_bnds_comp( j, la_linrx_err_i ) =
654 $ final_dz_z / (1 - dzratmax)
665 CALL zcopy( n, b( 1, j ), 1, res, 1 )
666 CALL zhemv(uplo, n, dcmplx(-1.0d+0), a, lda, y(1,j), 1,
667 $ dcmplx(1.0d+0), res, 1)
670 ayb( i ) = cabs1( b( i, j ) )
676 $ a, lda, y(1, j), 1, 1.0d+0, ayb, 1)
double precision function dlamch(CMACH)
DLAMCH
subroutine zaxpy(N, ZA, ZX, INCX, ZY, INCY)
ZAXPY
subroutine zcopy(N, ZX, INCX, ZY, INCY)
ZCOPY
subroutine zhemv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
ZHEMV
subroutine zla_heamv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
ZLA_HEAMV computes a matrix-vector product using a Hermitian indefinite matrix to calculate error bou...
subroutine zla_wwaddw(N, X, Y, W)
ZLA_WWADDW adds a vector into a doubled-single vector.
subroutine zla_lin_berr(N, NZ, NRHS, RES, AYB, BERR)
ZLA_LIN_BERR computes a component-wise relative backward error.
subroutine zpotrs(UPLO, N, NRHS, A, LDA, B, LDB, INFO)
ZPOTRS
subroutine zla_porfsx_extended(PREC_TYPE, UPLO, N, NRHS, A, LDA, AF, LDAF, COLEQU, C, B, LDB, Y, LDY, BERR_OUT, N_NORMS, ERR_BNDS_NORM, ERR_BNDS_COMP, RES, AYB, DY, Y_TAIL, RCOND, ITHRESH, RTHRESH, DZ_UB, IGNORE_CWISE, INFO)
ZLA_PORFSX_EXTENDED improves the computed solution to a system of linear equations for symmetric or H...