381 $ AF, LDAF, COLEQU, C, B, LDB, Y,
382 $ LDY, BERR_OUT, N_NORMS,
383 $ ERR_BNDS_NORM, ERR_BNDS_COMP, RES,
384 $ AYB, DY, Y_TAIL, RCOND, ITHRESH,
385 $ RTHRESH, DZ_UB, IGNORE_CWISE,
393 INTEGER INFO, LDA, LDAF, LDB, LDY, N, NRHS, PREC_TYPE,
396 LOGICAL COLEQU, IGNORE_CWISE
397 DOUBLE PRECISION RTHRESH, DZ_UB
400 DOUBLE PRECISION A( LDA, * ), AF( LDAF, * ), B( LDB, * ),
401 $ y( ldy, * ), res( * ), dy( * ), y_tail( * )
402 DOUBLE PRECISION C( * ), AYB(*), RCOND, BERR_OUT( * ),
403 $ err_bnds_norm( nrhs, * ),
404 $ err_bnds_comp( nrhs, * )
410 INTEGER UPLO2, CNT, I, J, X_STATE, Z_STATE
411 DOUBLE PRECISION YK, DYK, YMIN, NORMY, NORMX, NORMDX, DXRAT,
412 $ DZRAT, PREVNORMDX, PREV_DZ_Z, DXRATMAX,
413 $ DZRATMAX, DX_X, DZ_Z, FINAL_DX_X, FINAL_DZ_Z,
414 $ EPS, HUGEVAL, INCR_THRESH
418 INTEGER UNSTABLE_STATE, WORKING_STATE, CONV_STATE,
419 $ NOPROG_STATE, Y_PREC_STATE, BASE_RESIDUAL,
420 $ EXTRA_RESIDUAL, EXTRA_Y
421 parameter( unstable_state = 0, working_state = 1,
422 $ conv_state = 2, noprog_state = 3 )
423 parameter( base_residual = 0, extra_residual = 1,
425 INTEGER FINAL_NRM_ERR_I, FINAL_CMP_ERR_I, BERR_I
426 INTEGER RCOND_I, NRM_RCOND_I, NRM_ERR_I, CMP_RCOND_I
427 INTEGER CMP_ERR_I, PIV_GROWTH_I
428 PARAMETER ( FINAL_NRM_ERR_I = 1, final_cmp_err_i = 2,
430 parameter( rcond_i = 4, nrm_rcond_i = 5, nrm_err_i = 6 )
431 parameter( cmp_rcond_i = 7, cmp_err_i = 8,
433 INTEGER LA_LINRX_ITREF_I, LA_LINRX_ITHRESH_I,
435 parameter( la_linrx_itref_i = 1,
436 $ la_linrx_ithresh_i = 2 )
437 parameter( la_linrx_cwise_i = 3 )
438 INTEGER LA_LINRX_TRUST_I, LA_LINRX_ERR_I,
440 parameter( la_linrx_trust_i = 1, la_linrx_err_i = 2 )
441 parameter( la_linrx_rcond_i = 3 )
452 DOUBLE PRECISION DLAMCH
455 INTRINSIC abs, max, min
459 IF (info.NE.0)
RETURN
460 eps = dlamch(
'Epsilon' )
461 hugeval = dlamch(
'Overflow' )
463 hugeval = hugeval * hugeval
465 incr_thresh = dble( n ) * eps
467 IF ( lsame( uplo,
'L' ) )
THEN
468 uplo2 = ilauplo(
'L' )
470 uplo2 = ilauplo(
'U' )
474 y_prec_state = extra_residual
475 IF ( y_prec_state .EQ. extra_y )
THEN
492 x_state = working_state
493 z_state = unstable_state
501 CALL dcopy( n, b( 1, j ), 1, res, 1 )
502 IF ( y_prec_state .EQ. base_residual )
THEN
503 CALL dsymv( uplo, n, -1.0d+0, a, lda, y(1,j), 1,
505 ELSE IF ( y_prec_state .EQ. extra_residual )
THEN
506 CALL blas_dsymv_x( uplo2, n, -1.0d+0, a, lda,
507 $ y( 1, j ), 1, 1.0d+0, res, 1, prec_type )
509 CALL blas_dsymv2_x(uplo2, n, -1.0d+0, a, lda,
510 $ y(1, j), y_tail, 1, 1.0d+0, res, 1, prec_type)
514 CALL dcopy( n, res, 1, dy, 1 )
515 CALL dpotrs( uplo, n, 1, af, ldaf, dy, n, info )
526 yk = abs( y( i, j ) )
529 IF ( yk .NE. 0.0d+0 )
THEN
530 dz_z = max( dz_z, dyk / yk )
531 ELSE IF ( dyk .NE. 0.0d+0 )
THEN
535 ymin = min( ymin, yk )
537 normy = max( normy, yk )
540 normx = max( normx, yk * c( i ) )
541 normdx = max( normdx, dyk * c( i ) )
544 normdx = max( normdx, dyk )
548 IF ( normx .NE. 0.0d+0 )
THEN
549 dx_x = normdx / normx
550 ELSE IF ( normdx .EQ. 0.0d+0 )
THEN
556 dxrat = normdx / prevnormdx
557 dzrat = dz_z / prev_dz_z
561 IF ( ymin*rcond .LT. incr_thresh*normy
562 $ .AND. y_prec_state .LT. extra_y )
565 IF ( x_state .EQ. noprog_state .AND. dxrat .LE. rthresh )
566 $ x_state = working_state
567 IF ( x_state .EQ. working_state )
THEN
568 IF ( dx_x .LE. eps )
THEN
570 ELSE IF ( dxrat .GT. rthresh )
THEN
571 IF ( y_prec_state .NE. extra_y )
THEN
574 x_state = noprog_state
577 IF ( dxrat .GT. dxratmax ) dxratmax = dxrat
579 IF ( x_state .GT. working_state ) final_dx_x = dx_x
582 IF ( z_state .EQ. unstable_state .AND. dz_z .LE. dz_ub )
583 $ z_state = working_state
584 IF ( z_state .EQ. noprog_state .AND. dzrat .LE. rthresh )
585 $ z_state = working_state
586 IF ( z_state .EQ. working_state )
THEN
587 IF ( dz_z .LE. eps )
THEN
589 ELSE IF ( dz_z .GT. dz_ub )
THEN
590 z_state = unstable_state
593 ELSE IF ( dzrat .GT. rthresh )
THEN
594 IF ( y_prec_state .NE. extra_y )
THEN
597 z_state = noprog_state
600 IF ( dzrat .GT. dzratmax ) dzratmax = dzrat
602 IF ( z_state .GT. working_state ) final_dz_z = dz_z
605 IF ( x_state.NE.working_state.AND.
606 $ ( ignore_cwise.OR.z_state.NE.working_state ) )
609 IF ( incr_prec )
THEN
611 y_prec_state = y_prec_state + 1
622 IF (y_prec_state .LT. extra_y)
THEN
623 CALL daxpy( n, 1.0d+0, dy, 1, y(1,j), 1 )
634 IF ( x_state .EQ. working_state ) final_dx_x = dx_x
635 IF ( z_state .EQ. working_state ) final_dz_z = dz_z
639 IF ( n_norms .GE. 1 )
THEN
640 err_bnds_norm( j, la_linrx_err_i ) =
641 $ final_dx_x / (1 - dxratmax)
643 IF ( n_norms .GE. 2 )
THEN
644 err_bnds_comp( j, la_linrx_err_i ) =
645 $ final_dz_z / (1 - dzratmax)
656 CALL dcopy( n, b( 1, j ), 1, res, 1 )
657 CALL dsymv( uplo, n, -1.0d+0, a, lda, y(1,j), 1, 1.0d+0, res,
661 ayb( i ) = abs( b( i, j ) )
667 $ a, lda, y(1, j), 1, 1.0d+0, ayb, 1 )
subroutine daxpy(n, da, dx, incx, dy, incy)
DAXPY
subroutine dcopy(n, dx, incx, dy, incy)
DCOPY
subroutine dsymv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
DSYMV
subroutine dla_syamv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
DLA_SYAMV computes a matrix-vector product using a symmetric indefinite matrix to calculate error bou...
subroutine dla_lin_berr(n, nz, nrhs, res, ayb, berr)
DLA_LIN_BERR computes a component-wise relative backward error.
subroutine dla_porfsx_extended(prec_type, uplo, n, nrhs, a, lda, af, ldaf, colequ, c, b, ldb, y, ldy, berr_out, n_norms, err_bnds_norm, err_bnds_comp, res, ayb, dy, y_tail, rcond, ithresh, rthresh, dz_ub, ignore_cwise, info)
DLA_PORFSX_EXTENDED improves the computed solution to a system of linear equations for symmetric or H...
subroutine dla_wwaddw(n, x, y, w)
DLA_WWADDW adds a vector into a doubled-single vector.
subroutine dpotrs(uplo, n, nrhs, a, lda, b, ldb, info)
DPOTRS