391 $ LDA, AF, LDAF, IPIV, COLEQU, C, B,
392 $ LDB, Y, LDY, BERR_OUT, N_NORMS,
393 $ ERRS_N, ERRS_C, RES, AYB, DY,
394 $ Y_TAIL, RCOND, ITHRESH, RTHRESH,
395 $ DZ_UB, IGNORE_CWISE, INFO )
402 INTEGER INFO, LDA, LDAF, LDB, LDY, N, NRHS, PREC_TYPE,
403 $ TRANS_TYPE, N_NORMS
404 LOGICAL COLEQU, IGNORE_CWISE
406 DOUBLE PRECISION RTHRESH, DZ_UB
410 COMPLEX*16 A( LDA, * ), AF( LDAF, * ), B( LDB, * ),
411 $ y( ldy, * ), res( * ), dy( * ), y_tail( * )
412 DOUBLE PRECISION C( * ), AYB( * ), RCOND, BERR_OUT( * ),
413 $ ERRS_N( NRHS, * ), ERRS_C( NRHS, * )
420 INTEGER CNT, I, J, X_STATE, Z_STATE, Y_PREC_STATE
421 DOUBLE PRECISION YK, DYK, YMIN, NORMY, NORMX, NORMDX, DXRAT,
422 $ DZRAT, PREVNORMDX, PREV_DZ_Z, DXRATMAX,
423 $ DZRATMAX, DX_X, DZ_Z, FINAL_DX_X, FINAL_DZ_Z,
424 $ eps, hugeval, incr_thresh
429 INTEGER UNSTABLE_STATE, WORKING_STATE, CONV_STATE,
430 $ noprog_state, base_residual, extra_residual,
432 parameter( unstable_state = 0, working_state = 1,
435 parameter( base_residual = 0, extra_residual = 1,
437 INTEGER FINAL_NRM_ERR_I, FINAL_CMP_ERR_I, BERR_I
438 INTEGER RCOND_I, NRM_RCOND_I, NRM_ERR_I, CMP_RCOND_I
439 INTEGER CMP_ERR_I, PIV_GROWTH_I
440 parameter( final_nrm_err_i = 1, final_cmp_err_i = 2,
442 parameter( rcond_i = 4, nrm_rcond_i = 5, nrm_err_i = 6 )
443 parameter( cmp_rcond_i = 7, cmp_err_i = 8,
445 INTEGER LA_LINRX_ITREF_I, LA_LINRX_ITHRESH_I,
447 parameter( la_linrx_itref_i = 1,
448 $ la_linrx_ithresh_i = 2 )
449 parameter( la_linrx_cwise_i = 3 )
450 INTEGER LA_LINRX_TRUST_I, LA_LINRX_ERR_I,
452 parameter( la_linrx_trust_i = 1, la_linrx_err_i = 2 )
453 parameter( la_linrx_rcond_i = 3 )
459 DOUBLE PRECISION DLAMCH
460 CHARACTER CHLA_TRANSTYPE
463 INTRINSIC abs, max, min
466 DOUBLE PRECISION CABS1
469 cabs1( zdum ) = abs( dble( zdum ) ) + abs( dimag( zdum ) )
473 IF ( info.NE.0 )
RETURN
474 trans = chla_transtype(trans_type)
475 eps = dlamch(
'Epsilon' )
476 hugeval = dlamch(
'Overflow' )
478 hugeval = hugeval * hugeval
480 incr_thresh = dble( n ) * eps
483 y_prec_state = extra_residual
484 IF ( y_prec_state .EQ. extra_y )
THEN
501 x_state = working_state
502 z_state = unstable_state
510 CALL zcopy( n, b( 1, j ), 1, res, 1 )
511 IF ( y_prec_state .EQ. base_residual )
THEN
512 CALL zgemv( trans, n, n, (-1.0d+0,0.0d+0), a, lda,
513 $ y( 1, j ), 1, (1.0d+0,0.0d+0), res, 1)
514 ELSE IF (y_prec_state .EQ. extra_residual)
THEN
515 CALL blas_zgemv_x( trans_type, n, n, (-1.0d+0,0.0d+0), a,
516 $ lda, y( 1, j ), 1, (1.0d+0,0.0d+0),
517 $ res, 1, prec_type )
519 CALL blas_zgemv2_x( trans_type, n, n, (-1.0d+0,0.0d+0),
520 $ a, lda, y(1, j), y_tail, 1, (1.0d+0,0.0d+0), res, 1,
525 CALL zcopy( n, res, 1, dy, 1 )
526 CALL zgetrs( trans, n, 1, af, ldaf, ipiv, dy, n, info )
537 yk = cabs1( y( i, j ) )
538 dyk = cabs1( dy( i ) )
540 IF ( yk .NE. 0.0d+0 )
THEN
541 dz_z = max( dz_z, dyk / yk )
542 ELSE IF ( dyk .NE. 0.0d+0 )
THEN
546 ymin = min( ymin, yk )
548 normy = max( normy, yk )
551 normx = max( normx, yk * c( i ) )
552 normdx = max( normdx, dyk * c( i ) )
555 normdx = max(normdx, dyk)
559 IF ( normx .NE. 0.0d+0 )
THEN
560 dx_x = normdx / normx
561 ELSE IF ( normdx .EQ. 0.0d+0 )
THEN
567 dxrat = normdx / prevnormdx
568 dzrat = dz_z / prev_dz_z
572 IF (.NOT.ignore_cwise
573 $ .AND. ymin*rcond .LT. incr_thresh*normy
574 $ .AND. y_prec_state .LT. extra_y )
577 IF ( x_state .EQ. noprog_state .AND. dxrat .LE. rthresh )
578 $ x_state = working_state
579 IF ( x_state .EQ. working_state )
THEN
580 IF (dx_x .LE. eps)
THEN
582 ELSE IF ( dxrat .GT. rthresh )
THEN
583 IF ( y_prec_state .NE. extra_y )
THEN
586 x_state = noprog_state
589 IF ( dxrat .GT. dxratmax ) dxratmax = dxrat
591 IF ( x_state .GT. working_state ) final_dx_x = dx_x
594 IF ( z_state .EQ. unstable_state .AND. dz_z .LE. dz_ub )
595 $ z_state = working_state
596 IF ( z_state .EQ. noprog_state .AND. dzrat .LE. rthresh )
597 $ z_state = working_state
598 IF ( z_state .EQ. working_state )
THEN
599 IF ( dz_z .LE. eps )
THEN
601 ELSE IF ( dz_z .GT. dz_ub )
THEN
602 z_state = unstable_state
605 ELSE IF ( dzrat .GT. rthresh )
THEN
606 IF ( y_prec_state .NE. extra_y )
THEN
609 z_state = noprog_state
612 IF ( dzrat .GT. dzratmax ) dzratmax = dzrat
614 IF ( z_state .GT. working_state ) final_dz_z = dz_z
621 IF ( x_state.NE.working_state )
THEN
622 IF ( ignore_cwise )
GOTO 666
623 IF ( z_state.EQ.noprog_state .OR. z_state.EQ.conv_state )
625 IF ( z_state.EQ.unstable_state .AND. cnt.GT.1 )
GOTO 666
628 IF ( incr_prec )
THEN
630 y_prec_state = y_prec_state + 1
641 IF ( y_prec_state .LT. extra_y )
THEN
642 CALL zaxpy( n, (1.0d+0,0.0d+0), dy, 1, y(1,j), 1 )
653 IF ( x_state .EQ. working_state ) final_dx_x = dx_x
654 IF ( z_state .EQ. working_state ) final_dz_z = dz_z
658 IF (n_norms .GE. 1)
THEN
659 errs_n( j, la_linrx_err_i ) = final_dx_x / (1 - dxratmax)
662 IF ( n_norms .GE. 2 )
THEN
663 errs_c( j, la_linrx_err_i ) = final_dz_z / (1 - dzratmax)
674 CALL zcopy( n, b( 1, j ), 1, res, 1 )
675 CALL zgemv( trans, n, n, (-1.0d+0,0.0d+0), a, lda, y(1,j), 1,
676 $ (1.0d+0,0.0d+0), res, 1 )
679 ayb( i ) = cabs1( b( i, j ) )
684 CALL zla_geamv ( trans_type, n, n, 1.0d+0,
685 $ a, lda, y(1, j), 1, 1.0d+0, ayb, 1 )
subroutine zaxpy(n, za, zx, incx, zy, incy)
ZAXPY
subroutine zcopy(n, zx, incx, zy, incy)
ZCOPY
subroutine zgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
ZGEMV
subroutine zgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
ZGETRS
subroutine zla_geamv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
ZLA_GEAMV computes a matrix-vector product using a general matrix to calculate error bounds.
subroutine zla_gerfsx_extended(prec_type, trans_type, n, nrhs, a, lda, af, ldaf, ipiv, colequ, c, b, ldb, y, ldy, berr_out, n_norms, errs_n, errs_c, res, ayb, dy, y_tail, rcond, ithresh, rthresh, dz_ub, ignore_cwise, info)
ZLA_GERFSX_EXTENDED
subroutine zla_lin_berr(n, nz, nrhs, res, ayb, berr)
ZLA_LIN_BERR computes a component-wise relative backward error.
character *1 function chla_transtype(trans)
CHLA_TRANSTYPE
subroutine zla_wwaddw(n, x, y, w)
ZLA_WWADDW adds a vector into a doubled-single vector.
double precision function dlamch(cmach)
DLAMCH