+* Implemented Tree-Reweighted BP
* Implemented various heuristics for choosing a variable elimination sequence
in JTree
* Added BETHE method for GBP/HAK cluster choice
* \note There are two implementations, an optimized one (the default) which caches IndexFor objects,
* and a slower, less complicated one which is easier to maintain/understand. The slower one can be
* enabled by defining DAI_BP_FAST as false in the source file.
+ *
+ * \todo Merge duplicate code in calcNewMessage() and calcBeliefF()
*/
class BP : public DAIAlgFG {
protected:
*
* \todo Implement routines for UAI probabilistic inference evaluation data
*
+ * \todo Improve SWIG interfaces
+ *
* \idea Adapt (part of the) guidelines in http://www.boost.org/development/requirements.html#Design_and_Programming
*
* \idea Use "gcc -MM" to generate dependencies for targets: http://make.paulandlesley.org/autodep.html
* - Mean Field;
* - Loopy Belief Propagation [\ref KFL01];
* - Fractional Belief Propagation [\ref WiH03];
+ * - Tree-Reweighted Belief Propagation [\ref WJW03];
* - Tree Expectation Propagation [\ref MiQ04];
* - Generalized Belief Propagation [\ref YFW05];
* - Double-loop GBP [\ref HAK03];
* Approximate inference:
* - Mean Field: dai::MF
* - (Loopy) Belief Propagation: dai::BP [\ref KFL01]
+ * - Fractional Belief Propagation: dai::FBP [\ref WiH03]
+ * - Tree-Reweighted Belief Propagation: dai::TRWBP [\ref WJW03]
* - Tree Expectation Propagation: dai::TreeEP [\ref MiQ04]
* - Generalized Belief Propagation: dai::HAK [\ref YFW05]
* - Double-loop GBP: dai::HAK [\ref HAK03]
/// \file
/// \brief Defines classes related to Expectation Maximization (EMAlg, ParameterEstimation, CondProbEstimation and SharedParameters)
+/// \todo Implement parameter estimation for undirected models / factor graphs.
namespace dai {
/// Approximate inference algorithm "Fractional Belief Propagation" [\ref WiH03]
/** The Fractional Belief Propagation algorithm is like Belief
- * Propagation, but associates each factor with a scale parameter
+ * Propagation, but associates each factor with a weight (scale parameter)
* which controls the divergence measure being minimized. Standard
- * Belief Propagation corresponds to the case of FBP where each scale
- * parameter is 1. When cast as an EP algorithm, BP (and EP) minimize
+ * Belief Propagation corresponds to the case of FBP where each weight
+ * is 1. When cast as an EP algorithm, BP (and EP) minimize
* the inclusive KL-divergence, i.e. \f$\min_q KL(p||q)\f$ (note that the
* Bethe free energy is typically derived from \f$ KL(q||p) \f$). If each
- * factor \a I has scale parameter \f$ c_I \f$, then FBP minimizes the
+ * factor \a I has weight \f$ c_I \f$, then FBP minimizes the
* alpha-divergence with \f$ \alpha=1/c_I \f$ for that factor, which also
* corresponds to Power EP [\ref Min05].
*
* \f[ b_i(x_i) \propto \prod_{I\in N_i} m_{I\to i} \f]
* and the factor beliefs are calculated by:
* \f[ b_I(x_I) \propto f_I(x_I)^{1/c_I} \prod_{j \in N_I} m_{I\to j}^{1-1/c_I} \prod_{J\in N_j\setminus\{I\}} m_{J\to j} \f]
+ * The logarithm of the partition sum is approximated by:
+ * \f[ \log Z = \sum_{I} \sum_{x_I} b_I(x_I) \big( \log f_I(x_I) - c_I \log b_I(x_I) \big) + \sum_{i} (c_i - 1) \sum_{x_i} b_i(x_i) \log b_i(x_i) \f]
+ * where the variable weights are defined as
+ * \f[ c_i := \sum_{I \in N_i} c_I \f]
*
- * \todo Add nice way to set scale parameters
+ * \todo Add nice way to set weights
+ * \todo Merge duplicate code in calcNewMessage() and calcBeliefF()
* \author Frederik Eaton
*/
class FBP : public BP {
protected:
- /// Factor scale parameters (indexed by factor ID)
- std::vector<Real> _scale_factor;
+ /// Factor weights (indexed by factor ID)
+ std::vector<Real> _weight;
public:
/// Name of this inference algorithm
/// \name Constructors/destructors
//@{
/// Default constructor
- FBP() : BP(), _scale_factor() {}
+ FBP() : BP(), _weight() {}
/// Construct from FactorGraph \a fg and PropertySet \a opts
/** \param opts Parameters @see BP::Properties
*/
- FBP( const FactorGraph &fg, const PropertySet &opts ) : BP(fg, opts), _scale_factor() {
+ FBP( const FactorGraph &fg, const PropertySet &opts ) : BP(fg, opts), _weight() {
setProperties( opts );
construct();
}
virtual Real logZ() const;
//@}
- /// \name FBP accessors/mutators for scale parameters
+ /// \name FBP accessors/mutators for weights
//@{
- /// Returns scale parameter of the \a I 'th factor
- Real scaleF( size_t I ) const { return _scale_factor[I]; }
+ /// Returns weight of the \a I 'th factor
+ Real Weight( size_t I ) const { return _weight[I]; }
- /// Returns constant reference to vector of all factor scale parameters
- const std::vector<Real>& scaleFs() const { return _scale_factor; }
+ /// Returns constant reference to vector of all factor weights
+ const std::vector<Real>& Weights() const { return _weight; }
- /// Sets the scale parameter of the \a I 'th factor to \a c
- void setScaleF( size_t I, Real c ) { _scale_factor[I] = c; }
+ /// Sets the weight of the \a I 'th factor to \a c
+ void setWeight( size_t I, Real c ) { _weight[I] = c; }
- /// Sets the scale parameters of all factors simultaenously
- /** \note Faster than calling setScaleF(size_t,Real) for each factor
+ /// Sets the weights of all factors simultaenously
+ /** \note Faster than calling setWeight(size_t,Real) for each factor
*/
- void setScaleFs( const std::vector<Real> &c ) { _scale_factor = c; }
+ void setWeights( const std::vector<Real> &c ) { _weight = c; }
protected:
// Calculate the updated message from the \a _I 'th neighbor of variable \a i to variable \a i
* \f[ b_i(x_i) \propto \prod_{I\in N_i} m_{I\to i}^{c_I} \f]
* and the factor beliefs are calculated by:
* \f[ b_I(x_I) \propto f_I(x_I)^{1/c_I} \prod_{j \in N_I} m_{I\to j}^{c_I-1} \prod_{J\in N_j\setminus\{I\}} m_{J\to j}^{c_J} \f]
+ * The logarithm of the partition sum is approximated by:
+ * \f[ \log Z = \sum_{I} \sum_{x_I} b_I(x_I) \big( \log f_I(x_I) - c_I \log b_I(x_I) \big) + \sum_{i} (c_i - 1) \sum_{x_i} b_i(x_i) \log b_i(x_i) \f]
+ * where the variable weights are defined as
+ * \f[ c_i := \sum_{I \in N_i} c_I \f]
*
- * \todo Fix documentation
+ * \note TRWBP is actually equivalent to FBP
+ * \todo Merge duplicate code in calcNewMessage() and calcBeliefF()
+ * \todo Add nice way to set weights
+ * \todo Merge code of FBP and TRWBP
*/
class TRWBP : public BP {
protected:
- /// Factor scale parameters (indexed by factor ID)
- std::vector<Real> _edge_weight;
+ /// "Edge weights" (indexed by factor ID)
+ /** In [\ref WJW03], only unary or pairwise factors are considered.
+ * Here we are more general by having a weight for each factor in the
+ * factor graph. If unary factors have weight 1, and higher-order factors
+ * are absent, then we have the special case considered in [\ref WJW03].
+ */
+ std::vector<Real> _weight;
public:
/// Name of this inference algorithm
/// \name Constructors/destructors
//@{
/// Default constructor
- TRWBP() : BP(), _edge_weight() {}
+ TRWBP() : BP(), _weight() {}
/// Construct from FactorGraph \a fg and PropertySet \a opts
/** \param opts Parameters @see BP::Properties
*/
- TRWBP( const FactorGraph &fg, const PropertySet &opts ) : BP(fg, opts), _edge_weight() {
+ TRWBP( const FactorGraph &fg, const PropertySet &opts ) : BP(fg, opts), _weight() {
setProperties( opts );
construct();
}
/// \name TRWBP accessors/mutators for scale parameters
//@{
- /// Returns scale parameter of edge corresponding to the \a I 'th factor
- Real edgeWeight( size_t I ) const { return _edge_weight[I]; }
+ /// Returns weight corresponding to the \a I 'th factor
+ Real Weight( size_t I ) const { return _weight[I]; }
- /// Returns constant reference to vector of all factor scale parameters
- const std::vector<Real>& edgeWeights() const { return _edge_weight; }
+ /// Returns constant reference to vector of all weights
+ const std::vector<Real>& Weights() const { return _weight; }
- /// Sets the scale parameter of the \a I 'th factor to \a c
- void setEdgeWeight( size_t I, Real c ) { _edge_weight[I] = c; }
+ /// Sets the weight of the \a I 'th factor to \a c
+ void setWeight( size_t I, Real c ) { _weight[I] = c; }
- /// Sets the scale parameters of all factors simultaenously
- /** \note Faster than calling setScaleF(size_t,Real) for each factor
+ /// Sets the weights of all factors simultaenously
+ /** \note Faster than calling setWeight(size_t,Real) for each factor
*/
- void setEdgeWeights( const std::vector<Real> &c ) { _edge_weight = c; }
+ void setWeights( const std::vector<Real> &c ) { _weight = c; }
protected:
// Calculate the updated message from the \a _I 'th neighbor of variable \a i to variable \a i
// calculate updated message I->i
size_t I = nbV(i,_I);
- Factor Fprod( factor(I) );
- Prob &prod = Fprod.p();
- if( props.logdomain )
- prod.takeLog();
+ Prob marg;
+ if( factor(I).vars().size() == 1 ) // optimization
+ marg = factor(I).p();
+ else {
+ Factor Fprod( factor(I) );
+ Prob &prod = Fprod.p();
+ if( props.logdomain )
+ prod.takeLog();
+
+ // Calculate product of incoming messages and factor I
+ foreach( const Neighbor &j, nbF(I) )
+ if( j != i ) { // for all j in I \ i
+ // prod_j will be the product of messages coming into j
+ Prob prod_j( var(j).states(), props.logdomain ? 0.0 : 1.0 );
+ foreach( const Neighbor &J, nbV(j) )
+ if( J != I ) { // for all J in nb(j) \ I
+ if( props.logdomain )
+ prod_j += message( j, J.iter );
+ else
+ prod_j *= message( j, J.iter );
+ }
- // Calculate product of incoming messages and factor I
- foreach( const Neighbor &j, nbF(I) )
- if( j != i ) { // for all j in I \ i
- // prod_j will be the product of messages coming into j
- Prob prod_j( var(j).states(), props.logdomain ? 0.0 : 1.0 );
- foreach( const Neighbor &J, nbV(j) )
- if( J != I ) { // for all J in nb(j) \ I
+ // multiply prod with prod_j
+ if( !DAI_BP_FAST ) {
+ /* UNOPTIMIZED (SIMPLE TO READ, BUT SLOW) VERSION */
if( props.logdomain )
- prod_j += message( j, J.iter );
+ Fprod += Factor( var(j), prod_j );
else
- prod_j *= message( j, J.iter );
+ Fprod *= Factor( var(j), prod_j );
+ } else {
+ /* OPTIMIZED VERSION */
+ size_t _I = j.dual;
+ // ind is the precalculated IndexFor(j,I) i.e. to x_I == k corresponds x_j == ind[k]
+ const ind_t &ind = index(j, _I);
+ for( size_t r = 0; r < prod.size(); ++r )
+ if( props.logdomain )
+ prod[r] += prod_j[ind[r]];
+ else
+ prod[r] *= prod_j[ind[r]];
}
-
- // multiply prod with prod_j
- if( !DAI_BP_FAST ) {
- /* UNOPTIMIZED (SIMPLE TO READ, BUT SLOW) VERSION */
- if( props.logdomain )
- Fprod += Factor( var(j), prod_j );
- else
- Fprod *= Factor( var(j), prod_j );
- } else {
- /* OPTIMIZED VERSION */
- size_t _I = j.dual;
- // ind is the precalculated IndexFor(j,I) i.e. to x_I == k corresponds x_j == ind[k]
- const ind_t &ind = index(j, _I);
- for( size_t r = 0; r < prod.size(); ++r )
- if( props.logdomain )
- prod[r] += prod_j[ind[r]];
- else
- prod[r] *= prod_j[ind[r]];
}
- }
- if( props.logdomain ) {
- prod -= prod.max();
- prod.takeExp();
- }
+ if( props.logdomain ) {
+ prod -= prod.max();
+ prod.takeExp();
+ }
- // Marginalize onto i
- Prob marg;
- if( !DAI_BP_FAST ) {
- /* UNOPTIMIZED (SIMPLE TO READ, BUT SLOW) VERSION */
- if( props.inference == Properties::InfType::SUMPROD )
- marg = Fprod.marginal( var(i) ).p();
- else
- marg = Fprod.maxMarginal( var(i) ).p();
- } else {
- /* OPTIMIZED VERSION */
- marg = Prob( var(i).states(), 0.0 );
- // ind is the precalculated IndexFor(i,I) i.e. to x_I == k corresponds x_i == ind[k]
- const ind_t ind = index(i,_I);
- if( props.inference == Properties::InfType::SUMPROD )
- for( size_t r = 0; r < prod.size(); ++r )
- marg[ind[r]] += prod[r];
- else
- for( size_t r = 0; r < prod.size(); ++r )
- if( prod[r] > marg[ind[r]] )
- marg[ind[r]] = prod[r];
- marg.normalize();
+ // Marginalize onto i
+ if( !DAI_BP_FAST ) {
+ /* UNOPTIMIZED (SIMPLE TO READ, BUT SLOW) VERSION */
+ if( props.inference == Properties::InfType::SUMPROD )
+ marg = Fprod.marginal( var(i) ).p();
+ else
+ marg = Fprod.maxMarginal( var(i) ).p();
+ } else {
+ /* OPTIMIZED VERSION */
+ marg = Prob( var(i).states(), 0.0 );
+ // ind is the precalculated IndexFor(i,I) i.e. to x_I == k corresponds x_i == ind[k]
+ const ind_t ind = index(i,_I);
+ if( props.inference == Properties::InfType::SUMPROD )
+ for( size_t r = 0; r < prod.size(); ++r )
+ marg[ind[r]] += prod[r];
+ else
+ for( size_t r = 0; r < prod.size(); ++r )
+ if( prod[r] > marg[ind[r]] )
+ marg[ind[r]] = prod[r];
+ marg.normalize();
+ }
}
// Store result
Real sum = 0.0;
for( size_t I = 0; I < nrFactors(); I++ ) {
sum += (beliefF(I) * factor(I).log(true)).sum(); // FBP
- sum += scaleF(I) * beliefF(I).entropy(); // FBP
+ sum += Weight(I) * beliefF(I).entropy(); // FBP
}
for( size_t i = 0; i < nrVars(); ++i ) {
Real c_i = 0.0;
foreach( const Neighbor &I, nbV(i) )
- c_i += scaleF(I);
+ c_i += Weight(I);
sum += (1.0 - c_i) * beliefV(i).entropy(); // FBP
}
return sum;
// calculate updated message I->i
size_t I = nbV(i,_I);
- Real scale = scaleF(I); // FBP: c_I
+ Real c_I = Weight(I); // FBP: c_I
Factor Fprod( factor(I) );
Prob &prod = Fprod.p();
if( props.logdomain ) {
prod.takeLog();
- prod *= (1/scale); // FBP
+ prod /= c_I; // FBP
} else
- prod ^= (1/scale); // FBP
+ prod ^= (1.0 / c_I); // FBP
// Calculate product of incoming messages and factor I
foreach( const Neighbor &j, nbF(I) )
} else {
// FBP: multiply by m_Ij^(1-1/c_I)
if( props.logdomain )
- prod_j += message( j, J.iter )*(1-1/scale);
+ prod_j += message( j, J.iter ) * (1.0 - 1.0 / c_I);
else
- prod_j *= message( j, J.iter )^(1-1/scale);
+ prod_j *= message( j, J.iter ) ^ (1.0 - 1.0 / c_I);
}
// multiply prod with prod_j
Fprod *= Factor( var(j), prod_j );
} else {
/* OPTIMIZED VERSION */
+ size_t _I = j.dual;
// ind is the precalculated IndexFor(j,I) i.e. to x_I == k corresponds x_j == ind[k]
const ind_t &ind = index(j, _I);
for( size_t r = 0; r < prod.size(); ++r )
}
// FBP
- marg ^= scale;
+ marg ^= c_I;
// Store result
if( props.logdomain )
/* This code has been copied from bp.cpp, except where comments indicate FBP-specific behaviour */
void FBP::calcBeliefF( size_t I, Prob &p ) const {
- Real scale = scaleF(I); // FBP: c_I
+ Real c_I = Weight(I); // FBP: c_I
Factor Fprod( factor(I) );
Prob &prod = Fprod.p();
if( props.logdomain ) {
prod.takeLog();
- prod /= scale; // FBP
+ prod /= c_I; // FBP
} else
- prod ^= (1/scale); // FBP
+ prod ^= (1.0 / c_I); // FBP
foreach( const Neighbor &j, nbF(I) ) {
// prod_j will be the product of messages coming into j
} else {
// FBP: multiply by m_Ij^(1-1/c_I)
if( props.logdomain )
- prod_j += newMessage( j, J.iter)*(1-1/scale);
+ prod_j += newMessage( j, J.iter) * (1.0 - 1.0 / c_I);
else
- prod_j *= newMessage( j, J.iter)^(1-1/scale);
+ prod_j *= newMessage( j, J.iter) ^ (1.0 - 1.0 / c_I);
}
// multiply prod with prod_j
void FBP::construct() {
BP::construct();
- _scale_factor.resize( nrFactors(), 1.0 );
+ _weight.resize( nrFactors(), 1.0 );
}
Real TRWBP::logZ() const {
Real sum = 0.0;
for( size_t I = 0; I < nrFactors(); I++ ) {
- sum += (beliefF(I) * factor(I).log(true)).sum(); // TRWBP
- if( factor(I).vars().size() == 2 )
- sum -= edgeWeight(I) * MutualInfo( beliefF(I) ); // TRWBP
+ sum += (beliefF(I) * factor(I).log(true)).sum(); // TRWBP/FBP
+ sum += Weight(I) * beliefF(I).entropy(); // TRWBP/FBP
+ }
+ for( size_t i = 0; i < nrVars(); ++i ) {
+ Real c_i = 0.0;
+ foreach( const Neighbor &I, nbV(i) )
+ c_i += Weight(I);
+ sum += (1.0 - c_i) * beliefV(i).entropy(); // TRWBP/FBP
}
- for( size_t i = 0; i < nrVars(); ++i )
- sum += beliefV(i).entropy(); // TRWBP
return sum;
}
// calculate updated message I->i
size_t I = nbV(i,_I);
const Var &v_i = var(i);
- const VarSet &v_I = factor(I).vars();
- Real c_I = edgeWeight(I); // TRWBP: c_I (\mu_I in the paper)
+ Real c_I = Weight(I); // TRWBP: c_I (\mu_I in the paper)
Prob marg;
- if( v_I.size() == 1 ) { // optimization
+ if( factor(I).vars().size() == 1 ) { // optimization
marg = factor(I).p();
- } else
+ } else {
Factor Fprod( factor(I) );
Prob &prod = Fprod.p();
if( props.logdomain ) {
// prod_j will be the product of messages coming into j
Prob prod_j( v_j.states(), props.logdomain ? 0.0 : 1.0 );
foreach( const Neighbor &J, nbV(j) ) {
- Real c_J = edgeWeight(J);
+ Real c_J = Weight(J);
if( J != I ) { // for all J in nb(j) \ I
if( props.logdomain )
prod_j += message( j, J.iter ) * c_J;
Fprod *= Factor( v_j, prod_j );
} else {
/* OPTIMIZED VERSION */
+ size_t _I = j.dual;
// ind is the precalculated IndexFor(j,I) i.e. to x_I == k corresponds x_j == ind[k]
const ind_t &ind = index(j, _I);
for( size_t r = 0; r < prod.size(); ++r )
marg[ind[r]] = prod[r];
marg.normalize();
}
+ }
- // Store result
- if( props.logdomain )
- newMessage(i,_I) = marg.log();
- else
- newMessage(i,_I) = marg;
+ // Store result
+ if( props.logdomain )
+ newMessage(i,_I) = marg.log();
+ else
+ newMessage(i,_I) = marg;
- // Update the residual if necessary
- if( props.updates == Properties::UpdateType::SEQMAX )
- updateResidual( i, _I , dist( newMessage( i, _I ), message( i, _I ), Prob::DISTLINF ) );
- }
+ // Update the residual if necessary
+ if( props.updates == Properties::UpdateType::SEQMAX )
+ updateResidual( i, _I , dist( newMessage( i, _I ), message( i, _I ), Prob::DISTLINF ) );
}
void TRWBP::calcBeliefV( size_t i, Prob &p ) const {
p = Prob( var(i).states(), props.logdomain ? 0.0 : 1.0 );
foreach( const Neighbor &I, nbV(i) ) {
- Real c_I = edgeWeight(I);
+ Real c_I = Weight(I);
if( props.logdomain )
p += newMessage( i, I.iter ) * c_I;
else
/* This code has been copied from bp.cpp, except where comments indicate TRWBP-specific behaviour */
void TRWBP::calcBeliefF( size_t I, Prob &p ) const {
- Real c_I = edgeWeight(I); // TRWBP: c_I
- const VarSet &v_I = factor(I).vars();
+ Real c_I = Weight(I); // TRWBP: c_I
Factor Fprod( factor(I) );
Prob &prod = Fprod.p();
// prod_j will be the product of messages coming into j
Prob prod_j( v_j.states(), props.logdomain ? 0.0 : 1.0 );
foreach( const Neighbor &J, nbV(j) ) {
- Real c_J = edgeWeight(J);
+ Real c_J = Weight(J);
if( J != I ) { // for all J in nb(j) \ I
if( props.logdomain )
prod_j += newMessage( j, J.iter ) * c_J;
void TRWBP::construct() {
BP::construct();
- _edge_weight.resize( nrFactors(), 1.0 );
+ _weight.resize( nrFactors(), 1.0 );
}
#!/bin/bash
-./testdai --report-iters false --report-time false --marginals VAR --aliases aliases.conf --filename $1 --methods EXACT JTREE_HUGIN JTREE_SHSH BP_SEQFIX BP_SEQRND BP_SEQMAX BP_PARALL BP_SEQFIX_LOG BP_SEQRND_LOG BP_SEQMAX_LOG BP_PARALL_LOG MF_SEQRND TREEEP TREEEPWC GBP_MIN GBP_BETHE GBP_DELTA GBP_LOOP3 GBP_LOOP4 GBP_LOOP5 GBP_LOOP6 GBP_LOOP7 HAK_MIN HAK_BETHE HAK_DELTA HAK_LOOP3 HAK_LOOP4 HAK_LOOP5 MR_RESPPROP_FULL MR_CLAMPING_FULL MR_EXACT_FULL MR_RESPPROP_LINEAR MR_CLAMPING_LINEAR MR_EXACT_LINEAR LCBP_FULLCAV_SEQFIX LCBP_FULLCAVin_SEQFIX LCBP_FULLCAV_SEQRND LCBP_FULLCAVin_SEQRND LCBP_FULLCAV_NONE LCBP_FULLCAVin_NONE LCBP_PAIRCAV_SEQFIX LCBP_PAIRCAVin_SEQFIX LCBP_PAIRCAV_SEQRND LCBP_PAIRCAVin_SEQRND LCBP_PAIRCAV_NONE LCBP_PAIRCAVin_NONE LCBP_PAIR2CAV_SEQFIX LCBP_PAIR2CAVin_SEQFIX LCBP_PAIR2CAV_SEQRND LCBP_PAIR2CAVin_SEQRND LCBP_PAIR2CAV_NONE LCBP_PAIR2CAVin_NONE LCBP_UNICAV_SEQFIX LCBP_UNICAV_SEQRND BBP CBP
+./testdai --report-iters false --report-time false --marginals VAR --aliases aliases.conf --filename $1 --methods EXACT JTREE_HUGIN JTREE_SHSH BP_SEQFIX BP_SEQRND BP_SEQMAX BP_PARALL BP_SEQFIX_LOG BP_SEQRND_LOG BP_SEQMAX_LOG BP_PARALL_LOG FBP TRWBP MF_SEQRND TREEEP TREEEPWC GBP_MIN GBP_BETHE GBP_DELTA GBP_LOOP3 GBP_LOOP4 GBP_LOOP5 GBP_LOOP6 GBP_LOOP7 HAK_MIN HAK_BETHE HAK_DELTA HAK_LOOP3 HAK_LOOP4 HAK_LOOP5 MR_RESPPROP_FULL MR_CLAMPING_FULL MR_EXACT_FULL MR_RESPPROP_LINEAR MR_CLAMPING_LINEAR MR_EXACT_LINEAR LCBP_FULLCAV_SEQFIX LCBP_FULLCAVin_SEQFIX LCBP_FULLCAV_SEQRND LCBP_FULLCAVin_SEQRND LCBP_FULLCAV_NONE LCBP_FULLCAVin_NONE LCBP_PAIRCAV_SEQFIX LCBP_PAIRCAVin_SEQFIX LCBP_PAIRCAV_SEQRND LCBP_PAIRCAVin_SEQRND LCBP_PAIRCAV_NONE LCBP_PAIRCAVin_NONE LCBP_PAIR2CAV_SEQFIX LCBP_PAIR2CAVin_SEQFIX LCBP_PAIR2CAV_SEQRND LCBP_PAIR2CAVin_SEQRND LCBP_PAIR2CAV_NONE LCBP_PAIR2CAVin_NONE LCBP_UNICAV_SEQFIX LCBP_UNICAV_SEQRND BBP CBP
-@testdai --report-iters false --report-time false --marginals VAR --aliases aliases.conf --filename %1 --methods EXACT JTREE_HUGIN JTREE_SHSH BP_SEQFIX BP_SEQRND BP_SEQMAX BP_PARALL BP_SEQFIX_LOG BP_SEQRND_LOG BP_SEQMAX_LOG BP_PARALL_LOG MF_SEQRND TREEEP TREEEPWC GBP_MIN GBP_BETHE GBP_DELTA GBP_LOOP3 GBP_LOOP4 GBP_LOOP5 GBP_LOOP6 GBP_LOOP7 HAK_MIN HAK_BETHE HAK_DELTA HAK_LOOP3 HAK_LOOP4 HAK_LOOP5 MR_RESPPROP_FULL MR_CLAMPING_FULL MR_EXACT_FULL MR_RESPPROP_LINEAR MR_CLAMPING_LINEAR MR_EXACT_LINEAR LCBP_FULLCAV_SEQFIX LCBP_FULLCAVin_SEQFIX LCBP_FULLCAV_SEQRND LCBP_FULLCAVin_SEQRND LCBP_FULLCAV_NONE LCBP_FULLCAVin_NONE LCBP_PAIRCAV_SEQFIX LCBP_PAIRCAVin_SEQFIX LCBP_PAIRCAV_SEQRND LCBP_PAIRCAVin_SEQRND LCBP_PAIRCAV_NONE LCBP_PAIRCAVin_NONE LCBP_PAIR2CAV_SEQFIX LCBP_PAIR2CAVin_SEQFIX LCBP_PAIR2CAV_SEQRND LCBP_PAIR2CAVin_SEQRND LCBP_PAIR2CAV_NONE LCBP_PAIR2CAVin_NONE LCBP_UNICAV_SEQFIX LCBP_UNICAV_SEQRND BBP CBP
+@testdai --report-iters false --report-time false --marginals VAR --aliases aliases.conf --filename %1 --methods EXACT JTREE_HUGIN JTREE_SHSH BP_SEQFIX BP_SEQRND BP_SEQMAX BP_PARALL BP_SEQFIX_LOG BP_SEQRND_LOG BP_SEQMAX_LOG BP_PARALL_LOG FBP TRWBP MF_SEQRND TREEEP TREEEPWC GBP_MIN GBP_BETHE GBP_DELTA GBP_LOOP3 GBP_LOOP4 GBP_LOOP5 GBP_LOOP6 GBP_LOOP7 HAK_MIN HAK_BETHE HAK_DELTA HAK_LOOP3 HAK_LOOP4 HAK_LOOP5 MR_RESPPROP_FULL MR_CLAMPING_FULL MR_EXACT_FULL MR_RESPPROP_LINEAR MR_CLAMPING_LINEAR MR_EXACT_LINEAR LCBP_FULLCAV_SEQFIX LCBP_FULLCAVin_SEQFIX LCBP_FULLCAV_SEQRND LCBP_FULLCAVin_SEQRND LCBP_FULLCAV_NONE LCBP_FULLCAVin_NONE LCBP_PAIRCAV_SEQFIX LCBP_PAIRCAVin_SEQFIX LCBP_PAIRCAV_SEQRND LCBP_PAIRCAVin_SEQRND LCBP_PAIRCAV_NONE LCBP_PAIRCAVin_NONE LCBP_PAIR2CAV_SEQFIX LCBP_PAIR2CAVin_SEQFIX LCBP_PAIR2CAV_SEQRND LCBP_PAIR2CAVin_SEQRND LCBP_PAIR2CAV_NONE LCBP_PAIR2CAVin_NONE LCBP_UNICAV_SEQFIX LCBP_UNICAV_SEQRND BBP CBP
# ({x13}, (5.266e-01, 4.734e-01))
# ({x14}, (6.033e-01, 3.967e-01))
# ({x15}, (1.558e-01, 8.442e-01))
+FBP 9.483e-02 3.078e-02 +1.737e-02 1.000e-09
+# ({x0}, (4.233e-01, 5.767e-01))
+# ({x1}, (5.422e-01, 4.578e-01))
+# ({x2}, (4.662e-01, 5.338e-01))
+# ({x3}, (5.424e-01, 4.576e-01))
+# ({x4}, (6.042e-01, 3.958e-01))
+# ({x5}, (1.845e-01, 8.155e-01))
+# ({x6}, (8.203e-01, 1.797e-01))
+# ({x7}, (2.292e-01, 7.708e-01))
+# ({x8}, (3.119e-01, 6.881e-01))
+# ({x9}, (2.975e-01, 7.025e-01))
+# ({x10}, (7.268e-01, 2.732e-01))
+# ({x11}, (1.485e-01, 8.515e-01))
+# ({x12}, (4.512e-01, 5.488e-01))
+# ({x13}, (5.266e-01, 4.734e-01))
+# ({x14}, (6.033e-01, 3.967e-01))
+# ({x15}, (1.558e-01, 8.442e-01))
+TRWBP 9.483e-02 3.078e-02 +1.737e-02 1.000e-09
+# ({x0}, (4.233e-01, 5.767e-01))
+# ({x1}, (5.422e-01, 4.578e-01))
+# ({x2}, (4.662e-01, 5.338e-01))
+# ({x3}, (5.424e-01, 4.576e-01))
+# ({x4}, (6.042e-01, 3.958e-01))
+# ({x5}, (1.845e-01, 8.155e-01))
+# ({x6}, (8.203e-01, 1.797e-01))
+# ({x7}, (2.292e-01, 7.708e-01))
+# ({x8}, (3.119e-01, 6.881e-01))
+# ({x9}, (2.975e-01, 7.025e-01))
+# ({x10}, (7.268e-01, 2.732e-01))
+# ({x11}, (1.485e-01, 8.515e-01))
+# ({x12}, (4.512e-01, 5.488e-01))
+# ({x13}, (5.266e-01, 4.734e-01))
+# ({x14}, (6.033e-01, 3.967e-01))
+# ({x15}, (1.558e-01, 8.442e-01))
MF_SEQRND 3.607e-01 1.904e-01 -9.409e-02 1.000e-09
# ({x0}, (2.053e-01, 7.947e-01))
# ({x1}, (9.163e-01, 8.373e-02))