/// \file
/// \brief Defines class CBP [\ref EaG09]
/// \todo Improve documentation
-/// \todo Clean up
#ifndef __defined_libdai_cbp_h
#include <fstream>
-
#include <boost/shared_ptr.hpp>
#include <dai/daialg.h>
-
#include <dai/cbp.h>
#include <dai/bbp.h>
/// Find a variable to clamp using BBP (goes with maximum adjoint)
/// \see BBP
-std::pair<size_t, size_t> bbpFindClampVar(const InfAlg &in_bp, bool clampingVar,
- const PropertySet &bbp_props, bbp_cfn_t cfn,
- Real *maxVarOut);
+std::pair<size_t, size_t> bbpFindClampVar( const InfAlg &in_bp, bool clampingVar, const PropertySet &bbp_props, bbp_cfn_t cfn, Real *maxVarOut );
/// Class for CBP (Clamped Belief Propagation)
/** This algorithm uses configurable heuristics to choose a variable
* x_i and a state x_i*. Inference is done with x_i "clamped" to x_i*
- * (e.g. conditional on x_i==x_i*), and also with the negation of this
+ * (i.e., conditional on x_i == x_i*), and also with the negation of this
* condition. Clamping is done recursively up to a fixed number of
* levels (other stopping criteria are also implemented, see
- * "recursion" property). The resulting approximate marginals are
+ * \a recursion property). The resulting approximate marginals are
* combined using logZ estimates.
*/
class CBP : public DAIAlgFG {
private:
+ /// Variable beliefs
std::vector<Factor> _beliefsV;
+ /// Factor beliefs
std::vector<Factor> _beliefsF;
+ /// Log-partition sum
double _logZ;
- double _est_logZ;
- double _old_est_logZ;
+ /// Counts number of clampings at each leaf node
double _sum_level;
+
+ /// Number of leaves of recursion tree
size_t _num_leaves;
+ /// Output stream where information about the clampings is written
boost::shared_ptr<std::ofstream> _clamp_ofstream;
- bbp_cfn_t BBP_cost_function() {
- return props.bbp_cfn;
- }
+ /// Returns BBP cost function used
+ bbp_cfn_t BBP_cost_function() { return props.bbp_cfn; }
+ /// Prints beliefs, variables and partition sum, in case of a debugging build
void printDebugInfo();
/// Called by 'run', and by itself. Implements the main algorithm.
* beliefs estimates of the children, and returns the improved
* estimates in \a lz_out and \a beliefs_out to its parent
*/
- void runRecurse(InfAlg *bp,
- double orig_logZ,
- std::vector<size_t> clamped_vars_list,
- size_t &num_leaves,
- size_t &choose_count,
- double &sum_level,
- Real &lz_out,
- std::vector<Factor>& beliefs_out);
+ void runRecurse( InfAlg *bp, double orig_logZ, std::vector<size_t> clamped_vars_list, size_t &num_leaves,
+ size_t &choose_count, double &sum_level, Real &lz_out, std::vector<Factor> &beliefs_out );
/// Choose the next variable to clamp
/** Choose the next variable to clamp, given a converged InfAlg (\a bp),
* props.choose==CHOOSE_BBP then it is used to store the
* adjoint of the chosen variable
*/
- virtual bool chooseNextClampVar(InfAlg* bp,
- std::vector<size_t> &clamped_vars_list,
- size_t &i, std::vector<size_t> &xis, Real *maxVarOut);
+ virtual bool chooseNextClampVar( InfAlg* bp, std::vector<size_t> &clamped_vars_list, size_t &i, std::vector<size_t> &xis, Real *maxVarOut );
/// Return the InfAlg to use at each step of the recursion.
/// \todo At present, only returns a BP instance
InfAlg* getInfAlg();
+ /// Numer of iterations needed
size_t _iters;
+ /// Maximum difference encountered so far
double _maxdiff;
- void setBeliefs(const std::vector<Factor>& bs, double logZ) {
- size_t i=0;
- _beliefsV.clear(); _beliefsV.reserve(nrVars());
- _beliefsF.clear(); _beliefsF.reserve(nrFactors());
- for(i=0; i<nrVars(); i++) _beliefsV.push_back(bs[i]);
- for(; i<nrVars()+nrFactors(); i++) _beliefsF.push_back(bs[i]);
- _logZ = logZ;
- }
+ /// Sets variable beliefs, factor beliefs and logZ
+ /** \param bs should be a concatenation of the variable beliefs followed by the factor beliefs
+ */
+ void setBeliefs( const std::vector<Factor> &bs, double logZ );
+ /// Constructor helper function
void construct();
public:
-
- //----------------------------------------------------------------
-
- /// construct CBP object from FactorGraph
- CBP(const FactorGraph &fg, const PropertySet &opts) : DAIAlgFG(fg) {
- props.set(opts);
+ /// Construct CBP object from FactorGraph fg and PropertySet opts
+ CBP( const FactorGraph &fg, const PropertySet &opts ) : DAIAlgFG(fg) {
+ props.set( opts );
construct();
}
/// @name General InfAlg interface
//@{
virtual CBP* clone() const { return new CBP(*this); }
- virtual CBP* create() const { DAI_THROW(NOT_IMPLEMENTED); }
virtual std::string identify() const { return std::string(Name) + props.toString(); }
virtual Factor belief (const Var &n) const { return _beliefsV[findVar(n)]; }
virtual Factor belief (const VarSet &) const { DAI_THROW(NOT_IMPLEMENTED); }
virtual size_t Iterations() const { return _iters; }
//@}
- Factor beliefV (size_t i) const { return _beliefsV[i]; }
- Factor beliefF (size_t I) const { return _beliefsF[I]; }
+ Factor beliefV( size_t i ) const { return _beliefsV[i]; }
+ Factor beliefF( size_t I ) const { return _beliefsF[I]; }
//----------------------------------------------------------------
double rec_tol;
/// Maximum number of levels of recursion (\a recurse is REC_FIXED)
size_t max_levels = 10;
- /// If choose=CHOOSE_BBP and maximum adjoint is less than this value, don't recurse
+ /// If choose==CHOOSE_BBP and maximum adjoint is less than this value, don't recurse
double min_max_adj;
/// Heuristic for choosing clamping variable
ChooseMethodType choose;
} props;
/* }}} END OF GENERATED CODE */
- /// Returns heuristic used for clamping variable
- Properties::ChooseMethodType ChooseMethod() { return props.choose; }
-
- /// Returns method used for deciding when to stop recursing
- Properties::RecurseType Recursion() { return props.recursion; }
-
- /// Returns clamping type used
- Properties::ClampType Clamping() { return props.clamp; }
- /// Returns maximum number of levels of recursion
- size_t maxClampLevel() { return props.max_levels; }
- /// Returns props.min_max_adj @see CBP::Properties::min_max_adj
- double minMaxAdj() { return props.min_max_adj; }
- /// Returns tolerance used for controlling recursion depth
- double recTol() { return props.rec_tol; }
+ /// Returns heuristic used for clamping variable
+ Properties::ChooseMethodType ChooseMethod() { return props.choose; }
+ /// Returns method used for deciding when to stop recursing
+ Properties::RecurseType Recursion() { return props.recursion; }
+ /// Returns clamping type used
+ Properties::ClampType Clamping() { return props.clamp; }
+ /// Returns maximum number of levels of recursion
+ size_t maxClampLevel() { return props.max_levels; }
+ /// Returns props.min_max_adj @see CBP::Properties::min_max_adj
+ double minMaxAdj() { return props.min_max_adj; }
+ /// Returns tolerance used for controlling recursion depth
+ double recTol() { return props.rec_tol; }
};
+
/// Given a sorted vector of states \a xis and total state count \a n_states, return a vector of states not in \a xis
std::vector<size_t> complement( std::vector<size_t>& xis, size_t n_states );
const char *CBP::Name = "CBP";
+void CBP::setBeliefs( const std::vector<Factor> &bs, double logZ ) {
+ size_t i = 0;
+ _beliefsV.clear();
+ _beliefsV.reserve( nrVars() );
+ _beliefsF.clear();
+ _beliefsF.reserve( nrFactors() );
+ for( i = 0; i < nrVars(); i++ )
+ _beliefsV.push_back( bs[i] );
+ for( ; i < nrVars() + nrFactors(); i++ )
+ _beliefsF.push_back( bs[i] );
+ _logZ = logZ;
+}
+
+
void CBP::construct() {
- _beliefsV.clear(); _beliefsV.reserve(nrVars());
+ _beliefsV.clear();
+ _beliefsV.reserve(nrVars());
for( size_t i = 0; i < nrVars(); i++ )
_beliefsV.push_back( Factor(var(i)).normalized() );
- _beliefsF.clear(); _beliefsF.reserve(nrFactors());
+ _beliefsF.clear();
+ _beliefsF.reserve(nrFactors());
for( size_t I = 0; I < nrFactors(); I++ ) {
Factor f = factor(I);
f.fill(1); f.normalize();
- _beliefsF.push_back(f);
+ _beliefsF.push_back( f );
}
// to compute average level
_maxdiff = 0;
_iters = 0;
- if(props.clamp_outfile.length()>0) {
- _clamp_ofstream = shared_ptr<ofstream>(new ofstream(props.clamp_outfile.c_str(), ios_base::out|ios_base::trunc));
+ if( props.clamp_outfile.length() > 0 ) {
+ _clamp_ofstream = shared_ptr<ofstream>(new ofstream( props.clamp_outfile.c_str(), ios_base::out|ios_base::trunc ));
*_clamp_ofstream << "# COUNT LEVEL VAR STATE" << endl;
}
}
-static
-vector<Factor> mixBeliefs(Real p, vector<Factor> b, vector<Factor> c) {
- vector<Factor> out;
- assert(b.size()==c.size());
- out.reserve(b.size());
- Real pc = 1-p;
- for(size_t i=0; i<b.size(); i++) {
- // probably already normalized, but do it again just in case
- out.push_back(b[i].normalized()*p+
- c[i].normalized()*pc);
- }
- return out;
+/// Calculates a vector of mixtures p * b + (1-p) * c
+static vector<Factor> mixBeliefs( Real p, const vector<Factor> &b, const vector<Factor> &c ) {
+ vector<Factor> out;
+ assert( b.size() == c.size() );
+ out.reserve( b.size() );
+ Real pc = 1 - p;
+ for( size_t i = 0; i < b.size(); i++ )
+ // probably already normalized, but do it again just in case
+ out.push_back( b[i].normalized() * p + c[i].normalized() * pc );
+ return out;
}
double CBP::run() {
size_t seed = props.rand_seed;
- if(seed>0) rnd_seed(seed);
+ if( seed > 0)
+ rnd_seed( seed );
InfAlg *bp = getInfAlg();
bp->init();
vector<Factor> beliefs_out;
Real lz_out;
size_t choose_count=0;
- runRecurse(bp, bp->logZ(),
- vector<size_t>(0),
- _num_leaves, choose_count, _sum_level,
- lz_out, beliefs_out);
- if(props.verbose>=1) {
- cerr << "CBP average levels = " << (_sum_level/_num_leaves) << ", leaves = " << _num_leaves << endl;
- }
- setBeliefs(beliefs_out, lz_out);
- return 0;
+ runRecurse( bp, bp->logZ(), vector<size_t>(0), _num_leaves, choose_count, _sum_level, lz_out, beliefs_out );
+ if( props.verbose >= 1 )
+ cerr << "CBP average levels = " << (_sum_level / _num_leaves) << ", leaves = " << _num_leaves << endl;
+ setBeliefs( beliefs_out, lz_out );
+ return 0.0;
}
bpProps.Set("verbose", props.verbose);
bpProps.Set("logdomain", false);
bpProps.Set("damping", 0.0);
- BP *bp = new BP(*this,bpProps);
+ BP *bp = new BP( *this, bpProps );
bp->recordSentMessages = true;
bp->init();
return bp;
}
-void CBP::runRecurse(InfAlg *bp,
- double orig_logZ,
- vector<size_t> clamped_vars_list,
- size_t &num_leaves,
- size_t &choose_count,
- double &sum_level,
- Real &lz_out, vector<Factor>& beliefs_out) {
+void CBP::runRecurse( InfAlg *bp, double orig_logZ, vector<size_t> clamped_vars_list, size_t &num_leaves,
+ size_t &choose_count, double &sum_level, Real &lz_out, vector<Factor>& beliefs_out) {
// choose a variable/states to clamp:
size_t i;
vector<size_t> xis;
- Real maxVar=0.0;
+ Real maxVar = 0.0;
bool found;
- bool clampingVar = (Clamping()==Properties::ClampType::CLAMP_VAR);
+ bool clampingVar = (Clamping() == Properties::ClampType::CLAMP_VAR);
- if(Recursion()==Properties::RecurseType::REC_LOGZ && recTol()>0 &&
- exp(bp->logZ()-orig_logZ) < recTol()) {
+ if( Recursion() == Properties::RecurseType::REC_LOGZ && recTol() > 0 && exp( bp->logZ() - orig_logZ ) < recTol() )
found = false;
- } else {
- found = chooseNextClampVar(bp,
- clamped_vars_list,
- i, xis, &maxVar);
- }
+ else
+ found = chooseNextClampVar( bp, clamped_vars_list, i, xis, &maxVar );
- if(!found) {
+ if( !found ) {
num_leaves++;
sum_level += clamped_vars_list.size();
beliefs_out = bp->beliefs();
}
choose_count++;
- if(props.clamp_outfile.length()>0) {
+ if( props.clamp_outfile.length() > 0 )
*_clamp_ofstream << choose_count << "\t" << clamped_vars_list.size() << "\t" << i << "\t" << xis[0] << endl;
- }
- if(clampingVar) {
- foreach(size_t xi, xis) { assert(/*0<=xi &&*/ xi<var(i).states()); }
- } else {
- foreach(size_t xI, xis) { assert(/*0<=xI &&*/ xI<factor(i).states()); }
- }
+ if( clampingVar )
+ foreach( size_t xi, xis )
+ assert(/*0<=xi &&*/ xi < var(i).states() );
+ else
+ foreach( size_t xI, xis )
+ assert(/*0<=xI &&*/ xI < factor(i).states() );
// - otherwise, clamp and recurse, saving margin estimates for each
// clamp setting. afterwards, combine estimates.
// compute complement of 'xis'
- vector<size_t> cmp_xis=complement(xis, clampingVar?var(i).states():factor(i).states());
+ vector<size_t> cmp_xis = complement( xis, clampingVar ? var(i).states() : factor(i).states() );
- /// \todo could do this more efficiently with a nesting version of
- /// saveProbs/undoProbs
- Real lz; vector<Factor> b;
+ /// \todo could do this more efficiently with a nesting version of saveProbs/undoProbs
+ Real lz;
+ vector<Factor> b;
InfAlg *bp_c = bp->clone();
- if(clampingVar) {
- bp_c->fg().clampVar(i, xis);
- bp_c->init(var(i));
+ if( clampingVar ) {
+ bp_c->fg().clampVar( i, xis );
+ bp_c->init( var(i) );
} else {
- bp_c->fg().clampFactor(i, xis);
- bp_c->init(factor(i).vars());
+ bp_c->fg().clampFactor( i, xis );
+ bp_c->init( factor(i).vars() );
}
bp_c->run();
_iters += bp_c->Iterations();
lz = bp_c->logZ();
b = bp_c->beliefs();
- Real cmp_lz; vector<Factor> cmp_b;
+ Real cmp_lz;
+ vector<Factor> cmp_b;
InfAlg *cmp_bp_c = bp->clone();
- if(clampingVar) {
- cmp_bp_c->fg().clampVar(i,cmp_xis);
+ if( clampingVar ) {
+ cmp_bp_c->fg().clampVar( i, cmp_xis );
cmp_bp_c->init(var(i));
} else {
- cmp_bp_c->fg().clampFactor(i,cmp_xis);
- cmp_bp_c->init(factor(i).vars());
+ cmp_bp_c->fg().clampFactor( i, cmp_xis );
+ cmp_bp_c->init( factor(i).vars() );
}
cmp_bp_c->run();
_iters += cmp_bp_c->Iterations();
cmp_lz = cmp_bp_c->logZ();
cmp_b = cmp_bp_c->beliefs();
- double p = unSoftMax(lz, cmp_lz);
- Real bp__d=0.0;
+ double p = unSoftMax( lz, cmp_lz );
+ Real bp__d = 0.0;
- if(Recursion()==Properties::RecurseType::REC_BDIFF && recTol() > 0) {
- vector<Factor> combined_b(mixBeliefs(p,b,cmp_b));
- Real new_lz = logSumExp(lz,cmp_lz);
- bp__d = dist(bp->beliefs(),combined_b,nrVars());
- if(exp(new_lz-orig_logZ)*bp__d < recTol()) {
+ if( Recursion() == Properties::RecurseType::REC_BDIFF && recTol() > 0 ) {
+ vector<Factor> combined_b( mixBeliefs( p, b, cmp_b ) );
+ Real new_lz = logSumExp( lz,cmp_lz );
+ bp__d = dist( bp->beliefs(), combined_b, nrVars() );
+ if( exp( new_lz - orig_logZ) * bp__d < recTol() ) {
num_leaves++;
sum_level += clamped_vars_list.size();
beliefs_out = combined_b;
// either we are not doing REC_BDIFF or the distance was large
// enough to recurse:
- runRecurse(bp_c, orig_logZ,
- clamped_vars_list,
- num_leaves, choose_count, sum_level, lz, b);
- runRecurse(cmp_bp_c, orig_logZ,
- clamped_vars_list,
- num_leaves, choose_count, sum_level, cmp_lz, cmp_b);
+ runRecurse( bp_c, orig_logZ, clamped_vars_list, num_leaves, choose_count, sum_level, lz, b );
+ runRecurse( cmp_bp_c, orig_logZ, clamped_vars_list, num_leaves, choose_count, sum_level, cmp_lz, cmp_b );
- p = unSoftMax(lz,cmp_lz);
+ p = unSoftMax( lz, cmp_lz );
- beliefs_out = mixBeliefs(p, b, cmp_b);
- lz_out = logSumExp(lz,cmp_lz);
+ beliefs_out = mixBeliefs( p, b, cmp_b );
+ lz_out = logSumExp( lz, cmp_lz );
- if(props.verbose>=2) {
+ if( props.verbose >= 2 ) {
Real d = dist( bp->beliefs(), beliefs_out, nrVars() );
cerr << "Distance (clamping " << i << "): " << d;
- if(Recursion()==Properties::RecurseType::REC_BDIFF)
+ if( Recursion() == Properties::RecurseType::REC_BDIFF )
cerr << "; bp_dual predicted " << bp__d;
- fprintf(stderr, "; max_adjoint = %f; logZ = %f (in %f) (orig %f); p = %f; level = %zd\n",
- maxVar, lz_out, bp->logZ(), orig_logZ, p,
- clamped_vars_list.size());
+ cerr << "; max_adjoint = " << maxVar << "; logZ = " << lz_out << " (in " << bp->logZ() << ") (orig " << orig_logZ << "); p = " << p << "; level = " << clamped_vars_list.size() << endl;
}
- delete bp_c; delete cmp_bp_c;
+ delete bp_c;
+ delete cmp_bp_c;
}
// 'xis' must be sorted
-bool CBP::chooseNextClampVar(InfAlg* bp,
- vector<size_t> &clamped_vars_list,
- size_t &i, vector<size_t> &xis, Real *maxVarOut) {
- Real tiny=1.0e-14;
- if(props.verbose>=3) {
+bool CBP::chooseNextClampVar( InfAlg *bp, vector<size_t> &clamped_vars_list, size_t &i, vector<size_t> &xis, Real *maxVarOut ) {
+ Real tiny = 1.0e-14;
+ if( props.verbose >= 3 )
cerr << "clamped_vars_list" << clamped_vars_list << endl;
- }
- if(clamped_vars_list.size() >= maxClampLevel()) {
+ if( clamped_vars_list.size() >= maxClampLevel() )
return false;
- }
- if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_RANDOM) {
- if(Clamping()==Properties::ClampType::CLAMP_VAR) {
- int t=0, t1=100;
+ if( ChooseMethod() == Properties::ChooseMethodType::CHOOSE_RANDOM ) {
+ if( Clamping() == Properties::ClampType::CLAMP_VAR ) {
+ int t = 0, t1 = 100;
do {
- i = rnd(nrVars());
+ i = rnd( nrVars() );
t++;
- } while(abs(bp->beliefV(i).p().max()-1) < tiny &&
- t < t1);
- if(t==t1) {
+ } while( abs( bp->beliefV(i).p().max() - 1) < tiny && t < t1 );
+ if( t == t1 ) {
return false;
- // die("Too many levels requested in CBP");
+ // die("Too many levels requested in CBP");
}
// only pick probable values for variable
size_t xi;
do {
- xi = rnd(var(i).states());
+ xi = rnd( var(i).states() );
t++;
- } while(bp->beliefV(i).p()[xi] < tiny && t<t1);
- assert(t<t1);
- xis.resize(1, xi);
+ } while( bp->beliefV(i).p()[xi] < tiny && t < t1 );
+ assert( t < t1 );
+ xis.resize( 1, xi );
// assert(!_clamped_vars.count(i)); // not true for >2-ary variables
- DAI_IFVERB(2, "CHOOSE_RANDOM at level " << clamped_vars_list.size()
- << " chose variable " << i << " state " << xis[0] << endl);
+ DAI_IFVERB(2, "CHOOSE_RANDOM at level " << clamped_vars_list.size() << " chose variable " << i << " state " << xis[0] << endl);
} else {
- int t=0, t1=100;
+ int t = 0, t1 = 100;
do {
- i = rnd(nrFactors());
+ i = rnd( nrFactors() );
t++;
- } while(abs(bp->beliefF(i).p().max()-1) < tiny &&
- t < t1);
- if(t==t1) {
+ } while( abs( bp->beliefF(i).p().max() - 1) < tiny && t < t1 );
+ if( t == t1 )
return false;
- // die("Too many levels requested in CBP");
- }
+ // die("Too many levels requested in CBP");
// only pick probable values for variable
size_t xi;
do {
- xi = rnd(factor(i).states());
+ xi = rnd( factor(i).states() );
t++;
- } while(bp->beliefF(i).p()[xi] < tiny && t<t1);
- assert(t<t1);
- xis.resize(1, xi);
+ } while( bp->beliefF(i).p()[xi] < tiny && t < t1 );
+ assert( t < t1 );
+ xis.resize( 1, xi );
// assert(!_clamped_vars.count(i)); // not true for >2-ary variables
DAI_IFVERB(2, endl<<"CHOOSE_RANDOM chose factor "<<i<<" state "<<xis[0]<<endl);
}
- } else if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_MAXENT) {
- if(Clamping()==Properties::ClampType::CLAMP_VAR) {
- Real max_ent=-1.0;
- int win_k=-1, win_xk=-1;
- for(size_t k=0; k<nrVars(); k++) {
+ } else if( ChooseMethod() == Properties::ChooseMethodType::CHOOSE_MAXENT ) {
+ if( Clamping() == Properties::ClampType::CLAMP_VAR ) {
+ Real max_ent = -1.0;
+ int win_k = -1, win_xk = -1;
+ for( size_t k = 0; k < nrVars(); k++ ) {
Real ent=bp->beliefV(k).entropy();
- if(max_ent < ent) {
+ if( max_ent < ent ) {
max_ent = ent;
win_k = k;
win_xk = bp->beliefV(k).p().argmax().first;
}
}
- assert(win_k>=0); assert(win_xk>=0);
- i = win_k; xis.resize(1, win_xk);
+ assert( win_k >= 0 );
+ assert( win_xk >= 0 );
+ i = win_k;
+ xis.resize( 1, win_xk );
DAI_IFVERB(2, endl<<"CHOOSE_MAXENT chose variable "<<i<<" state "<<xis[0]<<endl);
- if(bp->beliefV(i).p()[xis[0]] < tiny) {
+ if( bp->beliefV(i).p()[xis[0]] < tiny ) {
DAI_IFVERB(2, "Warning: CHOOSE_MAXENT found unlikely state, not recursing");
return false;
}
} else {
- Real max_ent=-1.0;
- int win_k=-1, win_xk=-1;
- for(size_t k=0; k<nrFactors(); k++) {
- Real ent=bp->beliefF(k).entropy();
- if(max_ent < ent) {
+ Real max_ent = -1.0;
+ int win_k = -1, win_xk = -1;
+ for( size_t k = 0; k < nrFactors(); k++ ) {
+ Real ent = bp->beliefF(k).entropy();
+ if( max_ent < ent ) {
max_ent = ent;
win_k = k;
win_xk = bp->beliefF(k).p().argmax().first;
}
}
- assert(win_k>=0); assert(win_xk>=0);
- i = win_k; xis.resize(1, win_xk);
+ assert( win_k >= 0 );
+ assert( win_xk >= 0 );
+ i = win_k;
+ xis.resize( 1, win_xk );
DAI_IFVERB(2, endl<<"CHOOSE_MAXENT chose factor "<<i<<" state "<<xis[0]<<endl);
- if(bp->beliefF(i).p()[xis[0]] < tiny) {
+ if( bp->beliefF(i).p()[xis[0]] < tiny ) {
DAI_IFVERB(2, "Warning: CHOOSE_MAXENT found unlikely state, not recursing");
return false;
}
}
- } else if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BP_L1 ||
- ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BP_CFN) {
- bool doL1=(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BP_L1);
+ } else if( ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BP_L1 ||
+ ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BP_CFN ) {
+ bool doL1 = (ChooseMethod() == Properties::ChooseMethodType::CHOOSE_BP_L1);
vector<size_t> state;
- if(!doL1 && needGibbsState(BBP_cost_function())) {
- state = getGibbsState(*bp,2*bp->Iterations());
- }
+ if( !doL1 && needGibbsState( BBP_cost_function() ) )
+ state = getGibbsState( *bp, 2*bp->Iterations() );
// try clamping each variable manually
- assert(Clamping()==Properties::ClampType::CLAMP_VAR);
- Real max_cost=0.0;
- int win_k=-1, win_xk=-1;
- for(size_t k=0; k<nrVars(); k++) {
- for(size_t xk=0; xk<var(k).states(); xk++) {
- if(bp->beliefV(k)[xk]<tiny) continue;
+ assert( Clamping() == Properties::ClampType::CLAMP_VAR );
+ Real max_cost = 0.0;
+ int win_k = -1, win_xk = -1;
+ for( size_t k = 0; k < nrVars(); k++ ) {
+ for( size_t xk = 0; xk < var(k).states(); xk++ ) {
+ if( bp->beliefV(k)[xk] < tiny )
+ continue;
InfAlg *bp1 = bp->clone();
- bp1->clamp(var(k), xk);
- bp1->init(var(k));
+ bp1->clamp( var(k), xk );
+ bp1->init( var(k) );
bp1->run();
- Real cost=0;
- if(doL1) {
- for(size_t j=0; j<nrVars(); j++) {
- cost += dist(bp->beliefV(j), bp1->beliefV(j), Prob::DISTL1);
- }
- } else {
- cost = getCostFn(*bp1, BBP_cost_function(), &state);
- }
- if(cost>max_cost || win_k==-1) {
- max_cost=cost; win_k=k; win_xk=xk;
+ Real cost = 0;
+ if( doL1 )
+ for( size_t j = 0; j < nrVars(); j++ )
+ cost += dist( bp->beliefV(j), bp1->beliefV(j), Prob::DISTL1 );
+ else
+ cost = getCostFn( *bp1, BBP_cost_function(), &state );
+ if( cost > max_cost || win_k == -1 ) {
+ max_cost = cost;
+ win_k = k;
+ win_xk = xk;
}
delete bp1;
}
}
- assert(win_k>=0); assert(win_xk>=0);
- i = win_k; xis.resize(1, win_xk);
- } else if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BBP) {
- Real mvo; if(!maxVarOut) maxVarOut = &mvo;
- bool clampingVar = (Clamping()==Properties::ClampType::CLAMP_VAR);
- pair<size_t, size_t> cv =
- bbpFindClampVar(*bp,
- clampingVar,
- props.bbp_props,
- BBP_cost_function(),&mvo);
+ assert( win_k >= 0 );
+ assert( win_xk >= 0 );
+ i = win_k;
+ xis.resize( 1, win_xk );
+ } else if( ChooseMethod() == Properties::ChooseMethodType::CHOOSE_BBP ) {
+ Real mvo;
+ if( !maxVarOut )
+ maxVarOut = &mvo;
+ bool clampingVar = (Clamping() == Properties::ClampType::CLAMP_VAR);
+ pair<size_t, size_t> cv = bbpFindClampVar( *bp, clampingVar, props.bbp_props, BBP_cost_function(), &mvo );
// if slope isn't big enough then don't clamp
- if(mvo < minMaxAdj()) return false;
+ if( mvo < minMaxAdj() )
+ return false;
- size_t xi=cv.second;
+ size_t xi = cv.second;
i = cv.first;
#define VAR_INFO (clampingVar?"variable ":"factor ") \
<< i << " state " << xi \
<< " (p=" << (clampingVar?bp->beliefV(i)[xi]:bp->beliefF(i)[xi]) \
<< ", entropy = " << (clampingVar?bp->beliefV(i):bp->beliefF(i)).entropy() \
<< ", maxVar = "<< mvo << ")"
- Prob b = (clampingVar?bp->beliefV(i).p():bp->beliefF(i).p());
- if(b[xi] < tiny) {
- cerr << "Warning, at level "
- << clamped_vars_list.size()
- << ", bbpFindClampVar found unlikely "
- << VAR_INFO << endl;
+ Prob b = ( clampingVar ? bp->beliefV(i).p() : bp->beliefF(i).p());
+ if( b[xi] < tiny ) {
+ cerr << "Warning, at level " << clamped_vars_list.size() << ", bbpFindClampVar found unlikely " << VAR_INFO << endl;
return false;
}
- if(abs(b[xi]-1) < tiny) {
- cerr << "Warning at level "
- << clamped_vars_list.size()
- << ", bbpFindClampVar found overly likely "
- << VAR_INFO << endl;
+ if( abs(b[xi] - 1) < tiny ) {
+ cerr << "Warning at level " << clamped_vars_list.size() << ", bbpFindClampVar found overly likely " << VAR_INFO << endl;
return false;
}
- xis.resize(1,xi);
- if(clampingVar) {
- assert(/*0<=xi &&*/ xi<var(i).states());
- } else {
- assert(/*0<=xi &&*/ xi<factor(i).states());
- }
- DAI_IFVERB(2, "CHOOSE_BBP (num clamped = " << clamped_vars_list.size()
- << ") chose " << i << " state " << xi << endl);
- } else {
- abort();
- }
- clamped_vars_list.push_back(i);
+ xis.resize( 1, xi );
+ if( clampingVar )
+ assert(/*0<=xi &&*/ xi < var(i).states() );
+ else
+ assert(/*0<=xi &&*/ xi < factor(i).states() );
+ DAI_IFVERB(2, "CHOOSE_BBP (num clamped = " << clamped_vars_list.size() << ") chose " << i << " state " << xi << endl);
+ } else
+ DAI_THROW(INTERNAL_ERROR);
+ clamped_vars_list.push_back( i );
return true;
}
//----------------------------------------------------------------
-/** function which takes a factor graph as input, runs Gibbs and BP_dual,
+/** Function which takes a factor graph as input, runs Gibbs and BP_dual,
* creates and runs a BBP object, finds best variable, returns
* (variable,state) pair for clamping
*/
-pair<size_t, size_t> bbpFindClampVar(const InfAlg &in_bp, bool clampingVar,
- const PropertySet &bbp_props, bbp_cfn_t cfn,
- Real *maxVarOut)
-{
- BBP bbp(&in_bp,bbp_props);
- initBBPCostFnAdj(bbp, in_bp, cfn, NULL);
+pair<size_t, size_t> bbpFindClampVar( const InfAlg &in_bp, bool clampingVar, const PropertySet &bbp_props, bbp_cfn_t cfn, Real *maxVarOut ) {
+ BBP bbp( &in_bp, bbp_props );
+ initBBPCostFnAdj( bbp, in_bp, cfn, NULL );
bbp.run();
// find and return the (variable,state) with the largest adj_psi_V
- size_t argmax_var=0;
- size_t argmax_var_state=0;
- Real max_var=0;
- if(clampingVar) {
- for(size_t i=0; i<in_bp.fg().nrVars(); i++) {
+ size_t argmax_var = 0;
+ size_t argmax_var_state = 0;
+ Real max_var = 0;
+ if( clampingVar ) {
+ for( size_t i = 0; i < in_bp.fg().nrVars(); i++ ) {
Prob adj_psi_V = bbp.adj_psi_V(i);
if(0) {
// helps to account for amount of movement possible in variable
// i's beliefs? seems not..
adj_psi_V *= in_bp.beliefV(i).entropy();
}
- if(0) {
+ if(0){
// adj_psi_V *= Prob(in_bp.fg().var(i).states(),1.0)-in_bp.beliefV(i).p();
adj_psi_V *= in_bp.beliefV(i).p();
}
// adj_psi_V[gibbs.state()[i]] -= bp_dual.beliefV(i)[gibbs.state()[i]]/10;
pair<size_t,Real> argmax_state = adj_psi_V.argmax();
- if(i==0 || argmax_state.second>max_var) {
+ if( i == 0 || argmax_state.second > max_var ) {
argmax_var = i;
max_var = argmax_state.second;
argmax_var_state = argmax_state.first;
}
}
assert(/*0 <= argmax_var_state &&*/
- argmax_var_state < in_bp.fg().var(argmax_var).states());
+ argmax_var_state < in_bp.fg().var(argmax_var).states() );
} else {
- for(size_t I=0; I<in_bp.fg().nrFactors(); I++) {
+ for( size_t I = 0; I < in_bp.fg().nrFactors(); I++ ) {
Prob adj_psi_F = bbp.adj_psi_F(I);
if(0) {
// helps to account for amount of movement possible in variable
// adj_psi_V[gibbs.state()[i]] -= bp_dual.beliefV(i)[gibbs.state()[i]]/10;
pair<size_t,Real> argmax_state = adj_psi_F.argmax();
- if(I==0 || argmax_state.second>max_var) {
+ if( I == 0 || argmax_state.second > max_var ) {
argmax_var = I;
max_var = argmax_state.second;
argmax_var_state = argmax_state.first;
}
}
assert(/*0 <= argmax_var_state &&*/
- argmax_var_state < in_bp.fg().factor(argmax_var).states());
+ argmax_var_state < in_bp.fg().factor(argmax_var).states() );
}
- if(maxVarOut) *maxVarOut = max_var;
- return make_pair(argmax_var,argmax_var_state);
+ if( maxVarOut )
+ *maxVarOut = max_var;
+ return make_pair( argmax_var, argmax_var_state );
}
-vector<size_t> complement( vector<size_t>& xis, size_t n_states ) {
- vector<size_t> cmp_xis(0);
+vector<size_t> complement( vector<size_t> &xis, size_t n_states ) {
+ vector<size_t> cmp_xis( 0 );
size_t j = 0;
for( size_t xi = 0; xi < n_states; xi++ ) {
while( j < xis.size() && xis[j] < xi )
Real logSumExp( Real a, Real b ) {
if( a > b )
- return a + log1p(exp(b-a));
+ return a + log1p( exp( b-a ) );
else
- return b + log1p(exp(a-b));
+ return b + log1p( exp( a-b ) );
}
-Real dist( const vector<Factor>& b1, const vector<Factor>& b2, size_t nv ) {
+Real dist( const vector<Factor> &b1, const vector<Factor> &b2, size_t nv ) {
Real d = 0.0;
for( size_t k = 0; k < nv; k++ )
d += dist( b1[k], b2[k], Prob::DISTLINF );