386 $ af, ldaf, colequ, c, b, ldb, y,
387 $ ldy, berr_out, n_norms,
388 $ err_bnds_norm, err_bnds_comp, res,
389 $ ayb, dy, y_tail, rcond, ithresh,
390 $ rthresh, dz_ub, ignore_cwise,
399 INTEGER INFO, LDA, LDAF, LDB, LDY, N, NRHS, PREC_TYPE,
402 LOGICAL COLEQU, IGNORE_CWISE
403 DOUBLE PRECISION RTHRESH, DZ_UB
406 DOUBLE PRECISION A( lda, * ), AF( ldaf, * ), B( ldb, * ),
407 $ y( ldy, * ), res( * ), dy( * ), y_tail( * )
408 DOUBLE PRECISION C( * ), AYB(*), RCOND, BERR_OUT( * ),
409 $ err_bnds_norm( nrhs, * ),
410 $ err_bnds_comp( nrhs, * )
416 INTEGER UPLO2, CNT, I, J, X_STATE, Z_STATE
417 DOUBLE PRECISION YK, DYK, YMIN, NORMY, NORMX, NORMDX, DXRAT,
418 $ dzrat, prevnormdx, prev_dz_z, dxratmax,
419 $ dzratmax, dx_x, dz_z, final_dx_x, final_dz_z,
420 $ eps, hugeval, incr_thresh
424 INTEGER UNSTABLE_STATE, WORKING_STATE, CONV_STATE,
425 $ noprog_state, y_prec_state, base_residual,
426 $ extra_residual, extra_y
427 parameter ( unstable_state = 0, working_state = 1,
428 $ conv_state = 2, noprog_state = 3 )
429 parameter ( base_residual = 0, extra_residual = 1,
431 INTEGER FINAL_NRM_ERR_I, FINAL_CMP_ERR_I, BERR_I
432 INTEGER RCOND_I, NRM_RCOND_I, NRM_ERR_I, CMP_RCOND_I
433 INTEGER CMP_ERR_I, PIV_GROWTH_I
434 parameter ( final_nrm_err_i = 1, final_cmp_err_i = 2,
436 parameter ( rcond_i = 4, nrm_rcond_i = 5, nrm_err_i = 6 )
437 parameter ( cmp_rcond_i = 7, cmp_err_i = 8,
439 INTEGER LA_LINRX_ITREF_I, LA_LINRX_ITHRESH_I,
441 parameter ( la_linrx_itref_i = 1,
442 $ la_linrx_ithresh_i = 2 )
443 parameter ( la_linrx_cwise_i = 3 )
444 INTEGER LA_LINRX_TRUST_I, LA_LINRX_ERR_I,
446 parameter ( la_linrx_trust_i = 1, la_linrx_err_i = 2 )
447 parameter ( la_linrx_rcond_i = 3 )
458 DOUBLE PRECISION DLAMCH
461 INTRINSIC abs, max, min
465 IF (info.NE.0)
RETURN
466 eps = dlamch(
'Epsilon' )
467 hugeval = dlamch(
'Overflow' )
469 hugeval = hugeval * hugeval
471 incr_thresh = dble( n ) * eps
473 IF ( lsame( uplo,
'L' ) )
THEN
474 uplo2 = ilauplo(
'L' )
476 uplo2 = ilauplo(
'U' )
480 y_prec_state = extra_residual
481 IF ( y_prec_state .EQ. extra_y )
THEN
498 x_state = working_state
499 z_state = unstable_state
507 CALL dcopy( n, b( 1, j ), 1, res, 1 )
508 IF ( y_prec_state .EQ. base_residual )
THEN
509 CALL dsymv( uplo, n, -1.0d+0, a, lda, y(1,j), 1,
511 ELSE IF ( y_prec_state .EQ. extra_residual )
THEN
512 CALL blas_dsymv_x( uplo2, n, -1.0d+0, a, lda,
513 $ y( 1, j ), 1, 1.0d+0, res, 1, prec_type )
515 CALL blas_dsymv2_x(uplo2, n, -1.0d+0, a, lda,
516 $ y(1, j), y_tail, 1, 1.0d+0, res, 1, prec_type)
520 CALL dcopy( n, res, 1, dy, 1 )
521 CALL dpotrs( uplo, n, 1, af, ldaf, dy, n, info )
532 yk = abs( y( i, j ) )
535 IF ( yk .NE. 0.0d+0 )
THEN
536 dz_z = max( dz_z, dyk / yk )
537 ELSE IF ( dyk .NE. 0.0d+0 )
THEN
541 ymin = min( ymin, yk )
543 normy = max( normy, yk )
546 normx = max( normx, yk * c( i ) )
547 normdx = max( normdx, dyk * c( i ) )
550 normdx = max( normdx, dyk )
554 IF ( normx .NE. 0.0d+0 )
THEN
555 dx_x = normdx / normx
556 ELSE IF ( normdx .EQ. 0.0d+0 )
THEN
562 dxrat = normdx / prevnormdx
563 dzrat = dz_z / prev_dz_z
567 IF ( ymin*rcond .LT. incr_thresh*normy
568 $ .AND. y_prec_state .LT. extra_y )
571 IF ( x_state .EQ. noprog_state .AND. dxrat .LE. rthresh )
572 $ x_state = working_state
573 IF ( x_state .EQ. working_state )
THEN
574 IF ( dx_x .LE. eps )
THEN
576 ELSE IF ( dxrat .GT. rthresh )
THEN
577 IF ( y_prec_state .NE. extra_y )
THEN
580 x_state = noprog_state
583 IF ( dxrat .GT. dxratmax ) dxratmax = dxrat
585 IF ( x_state .GT. working_state ) final_dx_x = dx_x
588 IF ( z_state .EQ. unstable_state .AND. dz_z .LE. dz_ub )
589 $ z_state = working_state
590 IF ( z_state .EQ. noprog_state .AND. dzrat .LE. rthresh )
591 $ z_state = working_state
592 IF ( z_state .EQ. working_state )
THEN
593 IF ( dz_z .LE. eps )
THEN
595 ELSE IF ( dz_z .GT. dz_ub )
THEN
596 z_state = unstable_state
599 ELSE IF ( dzrat .GT. rthresh )
THEN
600 IF ( y_prec_state .NE. extra_y )
THEN
603 z_state = noprog_state
606 IF ( dzrat .GT. dzratmax ) dzratmax = dzrat
608 IF ( z_state .GT. working_state ) final_dz_z = dz_z
611 IF ( x_state.NE.working_state.AND.
612 $ ( ignore_cwise.OR.z_state.NE.working_state ) )
615 IF ( incr_prec )
THEN
617 y_prec_state = y_prec_state + 1
628 IF (y_prec_state .LT. extra_y)
THEN
629 CALL daxpy( n, 1.0d+0, dy, 1, y(1,j), 1 )
640 IF ( x_state .EQ. working_state ) final_dx_x = dx_x
641 IF ( z_state .EQ. working_state ) final_dz_z = dz_z
645 IF ( n_norms .GE. 1 )
THEN
646 err_bnds_norm( j, la_linrx_err_i ) =
647 $ final_dx_x / (1 - dxratmax)
649 IF ( n_norms .GE. 2 )
THEN
650 err_bnds_comp( j, la_linrx_err_i ) =
651 $ final_dz_z / (1 - dzratmax)
662 CALL dcopy( n, b( 1, j ), 1, res, 1 )
663 CALL dsymv( uplo, n, -1.0d+0, a, lda, y(1,j), 1, 1.0d+0, res,
667 ayb( i ) = abs( b( i, j ) )
673 $ a, lda, y(1, j), 1, 1.0d+0, ayb, 1 )
subroutine dla_porfsx_extended(PREC_TYPE, UPLO, N, NRHS, A, LDA, AF, LDAF, COLEQU, C, B, LDB, Y, LDY, BERR_OUT, N_NORMS, ERR_BNDS_NORM, ERR_BNDS_COMP, RES, AYB, DY, Y_TAIL, RCOND, ITHRESH, RTHRESH, DZ_UB, IGNORE_CWISE, INFO)
DLA_PORFSX_EXTENDED improves the computed solution to a system of linear equations for symmetric or H...
subroutine dcopy(N, DX, INCX, DY, INCY)
DCOPY
subroutine dla_lin_berr(N, NZ, NRHS, RES, AYB, BERR)
DLA_LIN_BERR computes a component-wise relative backward error.
subroutine daxpy(N, DA, DX, INCX, DY, INCY)
DAXPY
subroutine dpotrs(UPLO, N, NRHS, A, LDA, B, LDB, INFO)
DPOTRS
integer function ilauplo(UPLO)
ILAUPLO
subroutine dla_wwaddw(N, X, Y, W)
DLA_WWADDW adds a vector into a doubled-single vector.
subroutine dla_syamv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
DLA_SYAMV computes a matrix-vector product using a symmetric indefinite matrix to calculate error bou...
subroutine dsymv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
DSYMV