CCFLAGS:=$(CCFLAGS) -DDAI_WITH_GIBBS
OBJECTS:=$(OBJECTS) gibbs$(OE)
endif
+ifdef WITH_BP_DUAL
+ CCFLAGS:=$(CCFLAGS) -DDAI_WITH_BP_DUAL
+ OBJECTS:=$(OBJECTS) bp_dual$(OE)
+endif
+ifdef WITH_CBP
+ CCFLAGS:=$(CCFLAGS) -DDAI_WITH_CBP
+ OBJECTS:=$(OBJECTS) bbp$(OE) cbp$(OE)
+endif
+
# Define standard libDAI header dependencies
-HEADERS=$(INC)/bipgraph.h $(INC)/index.h $(INC)/var.h $(INC)/factor.h $(INC)/varset.h $(INC)/smallset.h $(INC)/prob.h $(INC)/daialg.h $(INC)/properties.h $(INC)/alldai.h $(INC)/enum.h $(INC)/exceptions.h
+HEADERS=$(INC)/bipgraph.h $(INC)/index.h $(INC)/var.h $(INC)/factor.h $(INC)/varset.h $(INC)/smallset.h $(INC)/prob.h $(INC)/daialg.h $(INC)/properties.h $(INC)/alldai.h $(INC)/enum.h $(INC)/exceptions.h $(INC)/util.h
# Setup final command for C++ compiler and MEX
ifdef DEBUG
bp$(OE) : $(SRC)/bp.cpp $(INC)/bp.h $(HEADERS)
$(CC) -c $(SRC)/bp.cpp
+bp_dual$(OE) : $(SRC)/bp_dual.cpp $(INC)/bp_dual.h $(HEADERS)
+ $(CC) -c $(SRC)/bp_dual.cpp
+
+bbp$(OE) : $(SRC)/bbp.cpp $(INC)/bbp.h $(INC)/bp_dual.h $(HEADERS)
+ $(CC) -c $(SRC)/bbp.cpp
+
+cbp$(OE) : $(SRC)/cbp.cpp $(INC)/cbp.h $(INC)/bbp.h $(INC)/bp_dual.h $(HEADERS)
+ $(CC) -c $(SRC)/cbp.cpp
+
lc$(OE) : $(SRC)/lc.cpp $(INC)/lc.h $(HEADERS)
$(CC) -c $(SRC)/lc.cpp
WITH_JTREE=true
WITH_MR=true
WITH_GIBBS=true
+WITH_BP_DUAL=true
+WITH_CBP=true
# Build doxygen documentation?
WITH_DOC=true
# Build with debug info?
WITH_JTREE=true
WITH_MR=true
WITH_GIBBS=true
+WITH_BP_DUAL=true
+WITH_CBP=true
# Build doxygen documentation?
WITH_DOC=true
# Build with debug info?
WITH_JTREE=true
WITH_MR=true
WITH_GIBBS=true
+WITH_BP_DUAL=true
+WITH_CBP=true
# Build doxygen documentation?
WITH_DOC=true
# Build with debug info?
WITH_JTREE=true
WITH_MR=true
WITH_GIBBS=true
+WITH_BP_DUAL=true
+WITH_CBP=true
# Build doxygen documentation?
WITH_DOC=
# Build with debug info?
#ifdef DAI_WITH_GIBBS
#include <dai/gibbs.h>
#endif
+#ifdef DAI_WITH_BP_DUAL
+ #include <dai/bp_dual.h>
+#endif
+#ifdef DAI_WITH_CBP
+ #include <dai/cbp.h>
+#endif
/// Namespace for libDAI
#endif
#ifdef DAI_WITH_GIBBS
Gibbs::Name,
+#endif
+#ifdef DAI_WITH_CBP
+ CBP::Name,
#endif
""
};
--- /dev/null
+#ifndef ___defined_libdai_bbp_h
+#define ___defined_libdai_bbp_h
+
+#include <vector>
+#include <utility>
+#include <ext/hash_map>
+
+#include <boost/tuple/tuple.hpp>
+
+#include <dai/prob.h>
+#include <dai/daialg.h>
+#include <dai/factorgraph.h>
+#include <dai/enum.h>
+
+#include <dai/bp_dual.h>
+
+namespace dai {
+
+using namespace std;
+using namespace __gnu_cxx;
+using boost::tuple;
+using boost::get;
+
+// utility function to subtract 1 from a size_t, or return 0 if the
+// argument is 0
+inline size_t oneLess(size_t v) { return v==0?v:v-1; }
+
+/// function to compute adj_w_unnorm from w, Z_w, adj_w
+Prob unnormAdjoint(const Prob &w, Real Z_w, const Prob &adj_w);
+
+vector<size_t> getGibbsState(const BP_dual& fg, size_t iters);
+
+vector<Prob> get_zero_adj_2(const BP_dual& bp_dual);
+vector<Prob> get_zero_adj_1(const BP_dual& bp_dual);
+
+class BBP {
+ // ----------------------------------------------------------------
+ // inputs
+ const BP_dual* _bp_dual;
+ const FactorGraph* _fg;
+ vector<Prob> _adj_b_1, _adj_b_2;
+ vector<Prob> _adj_b_1_unnorm, _adj_b_2_unnorm;
+ // in case the objective function depends on factors as well:
+ vector<Prob> _init_adj_psi_1;
+ vector<Prob> _init_adj_psi_2;
+ // ----------------------------------------------------------------
+ // ouptuts
+ vector<Prob> _adj_psi_1, _adj_psi_2;
+ // the following vectors are length _fg->nr_edges()
+ vector<Prob> _adj_n, _adj_m;
+ vector<Prob> _adj_n_unnorm, _adj_m_unnorm;
+ vector<Prob> _new_adj_n, _new_adj_m;
+ // ----------------------------------------------------------------
+ // auxiliary data
+ typedef vector<hash_map<size_t, vector<size_t> > > _trip_t;
+ typedef vector<size_t> _trip_end_t;
+
+ // members to store indices of VFV and FVF triples
+ // XXX need to have vector going in other direction so that we can
+ // do faster looping on iIj and IiJ
+ _trip_t _VFV;
+ _trip_t _FVF;
+
+ typedef vector<tuple<size_t,size_t,size_t> > _trip_ind_t;
+ _trip_ind_t _VFV_ind;
+ _trip_ind_t _FVF_ind;
+
+ // helper quantities computed from the BP messages
+ vector<Prob> _T,_U,_S,_R;
+ size_t _iters;
+ size_t _max_iter;
+
+ public:
+ void RegenerateInds();
+ void RegenerateT();
+ void RegenerateU();
+ void RegenerateS();
+ void RegenerateR();
+ void RegenerateInputs();
+ /// initialise members for messages and factor adjoints
+ void RegeneratePsiAdjoints();
+ void RegenerateMessageAdjoints();
+ void Regenerate();
+
+ size_t VFV(size_t i, size_t I, size_t j) const { return (*const_cast<_trip_t*>(&_VFV))[i][I][j]; }
+ size_t FVF(size_t I, size_t i, size_t J) const { return (*const_cast<_trip_t*>(&_FVF))[I][i][J]; }
+ DAI_ACCMUT(Prob & T(size_t i, size_t I), { return _T[_fg->VV2E(i,I)]; });
+ DAI_ACCMUT(Prob & U(size_t I, size_t i), { return _U[_fg->VV2E(i,I)]; });
+ DAI_ACCMUT(Prob & S(size_t i, size_t I, size_t j), { return _S[VFV(i,I,j)]; });
+ DAI_ACCMUT(Prob & R(size_t I, size_t i, size_t J), { return _R[FVF(I,i,J)]; });
+
+ DAI_ACCMUT(Prob & T(size_t iI), { return _T[iI]; });
+ DAI_ACCMUT(Prob & U(size_t iI), { return _U[iI]; });
+ DAI_ACCMUT(Prob & S(size_t iIj), { return _S[iIj]; });
+ DAI_ACCMUT(Prob & R(size_t IiJ), { return _R[IiJ]; });
+
+ void calcNewN(size_t iI);
+ void calcNewM(size_t iI);
+ void upMsgM(size_t iI);
+ void upMsgN(size_t iI);
+ void doParUpdate();
+ Real getUnMsgMag();
+ void getMsgMags(Real &s, Real &new_s);
+
+ void zero_adj_b_2() {
+ _adj_b_2.clear();
+ _adj_b_2.reserve(_bp_dual->nrFactors());
+ for(size_t I=0; I<_bp_dual->nrFactors(); I++) {
+ _adj_b_2.push_back(Prob(_bp_dual->factor(I).states(),Real(0.0)));
+ }
+ }
+ public:
+ BBP(const BP_dual &bp_dual) :
+ _bp_dual(&bp_dual), _fg(&bp_dual), _max_iter(50) {}
+
+ DAI_ACCMUT(size_t& maxIter(), { return _max_iter; });
+
+ void init(const vector<Prob> &adj_b_1, const vector<Prob> &adj_b_2,
+ const vector<Prob> &adj_psi_1, const vector<Prob> &adj_psi_2) {
+ _adj_b_1 = adj_b_1;
+ _adj_b_2 = adj_b_2;
+ _init_adj_psi_1 = adj_psi_1;
+ _init_adj_psi_2 = adj_psi_2;
+ Regenerate();
+ }
+ void init(const vector<Prob> &adj_b_1, const vector<Prob> &adj_b_2) {
+ init(adj_b_1, adj_b_2, get_zero_adj_1(*_bp_dual), get_zero_adj_2(*_bp_dual));
+ }
+ void init(const vector<Prob> &adj_b_1) {
+ init(adj_b_1, get_zero_adj_2(*_bp_dual));
+ }
+
+ /// run until change is less than given tolerance
+ void run(Real tol);
+
+ size_t doneIters() { return _iters; }
+
+ DAI_ACCMUT(Prob& adj_psi_1(size_t i), { return _adj_psi_1[i]; });
+ DAI_ACCMUT(Prob& adj_psi_2(size_t I), { return _adj_psi_2[I]; });
+ DAI_ACCMUT(Prob& adj_b_1(size_t i), { return _adj_b_1[i]; });
+ DAI_ACCMUT(Prob& adj_b_2(size_t I), { return _adj_b_2[I]; });
+ DAI_ACCMUT(Prob& adj_n(size_t i, size_t I), { return _adj_n[_fg->VV2E(i,I)]; });
+ DAI_ACCMUT(Prob& adj_m(size_t I, size_t i), { return _adj_m[_fg->VV2E(i,I)]; });
+};
+
+/// Cost functions. Not used by BBP class, only used by following functions
+DAI_ENUM(bbp_cfn_t, cfn_b, cfn_b2, cfn_exp, cfn_var_ent, cfn_factor_ent, cfn_bethe_ent);
+
+/// initialise BBP using BP_dual and (cfn_type, stateP)
+void gibbsInitBBPCostFnAdj(BBP& bbp, const BP_dual& fg, bbp_cfn_t cfn_type, const vector<size_t>* stateP);
+
+/// calculate actual value of cost function (cfn_type, stateP)
+Real gibbsCostFn(const BP_dual& fg, bbp_cfn_t cfn_type, const vector<size_t> *stateP);
+
+/// function to test the validity of adjoints computed by BBP
+double numericBBPTest(const BP_dual& bp_dual, bbp_cfn_t cfn, double bbpTol, double h);
+
+/// output to "./bbp-data/" a set of files which evaluate the effect
+/// on the given cost function (for a random Gibbs state) of
+/// perturbing each graph variable
+void makeBBPGraph(const BP_dual& bp_dual, bbp_cfn_t cfn);
+
+}
+
+#endif
std::vector<size_t> ind2; // indices of nodes of type 2
};
+ /// Support for some backwards compatibility with old interface
+ /** Call indexEdges() first to initialize these members
+ */
+ bool _edge_indexed;
+ std::vector<Edge> _edges;
+ hash_map<Edge,size_t> _vv2e;
+
public:
/// Default constructor (creates an empty bipartite graph)
- BipartiteGraph() : _nb1(), _nb2() {}
+ BipartiteGraph() : _nb1(), _nb2(), _edge_indexed(false) {}
/// Constructs BipartiteGraph from a range of edges.
/** \tparam EdgeInputIterator Iterator that iterates over instances of BipartiteGraph::Edge.
* \param end Points just beyond the last edge.
*/
template<typename EdgeInputIterator>
- BipartiteGraph( size_t nr1, size_t nr2, EdgeInputIterator begin, EdgeInputIterator end ) : _nb1( nr1 ), _nb2( nr2 ) {
+ BipartiteGraph( size_t nr1, size_t nr2, EdgeInputIterator begin, EdgeInputIterator end ) : _nb1( nr1 ), _nb2( nr2 ), _edge_indexed(false) {
construct( nr1, nr2, begin, end );
}
/// Writes this BipartiteGraph to an output stream in GraphViz .dot syntax
void printDot( std::ostream& os ) const;
+ // ----------------------------------------------------------------
+ // backwards compatibility layer
+ void indexEdges() {
+ _edges.clear();
+ _vv2e.clear();
+ size_t i=0;
+ foreach(const Neighbors &nb1s, _nb1) {
+ foreach(const Neighbor &n2, nb1s) {
+ Edge e(i, n2.node);
+ _edges.push_back(e);
+ }
+ i++;
+ }
+ sort(_edges.begin(), _edges.end()); // unnecessary?
+
+ i=0;
+ foreach(const Edge& e, _edges) {
+ _vv2e[e] = i++;
+ }
+
+ _edge_indexed = true;
+ }
+
+ const Edge& edge(size_t e) const {
+ assert(_edge_indexed);
+ return _edges[e];
+ }
+
+ const std::vector<Edge>& edges() const {
+ return _edges;
+ }
+
+ size_t VV2E(size_t n1, size_t n2) const {
+ assert(_edge_indexed);
+ Edge e(n1,n2);
+ hash_map<Edge,size_t>::const_iterator i = _vv2e.find(e);
+ assert(i != _vv2e.end());
+ return i->second;
+ }
+
+ size_t nr_edges() const {
+ assert(_edge_indexed);
+ return _edges.size();
+ }
+
private:
/// Checks internal consistency
void check() const;
--- /dev/null
+#ifndef ____defined_libdai_bp_dual_h__
+#define ____defined_libdai_bp_dual_h__
+
+#include <dai/daialg.h>
+#include <dai/factorgraph.h>
+#include <dai/enum.h>
+#include <dai/bp.h>
+
+namespace dai {
+
+using namespace std;
+
+struct BP_dual_messages {
+ // messages:
+ // indexed by edge index (using VV2E)
+ vector<Prob> n;
+ vector<Real> Zn;
+ vector<Prob> m;
+ vector<Real> Zm;
+};
+
+struct BP_dual_beliefs {
+ // beliefs:
+ // indexed by node
+ vector<Prob> b1;
+ vector<Real> Zb1;
+ // indexed by factor
+ vector<Prob> b2;
+ vector<Real> Zb2;
+};
+
+void _clamp(FactorGraph &g, const Var & n, const vector<size_t> &is );
+
+// clamp a factor to have one of a set of values
+void _clampFactor(FactorGraph &g, size_t I, const vector<size_t> &is);
+
+class BP_dual : public DAIAlgFG {
+ public:
+ typedef vector<size_t> _ind_t;
+
+ protected:
+
+ // indexed by edge index. for each edge i->I, contains a
+ // vector whose entries correspond to those of I, and the
+ // value of each entry is the corresponding entry of i
+ vector<_ind_t> _indices;
+
+ BP_dual_messages _msgs;
+ BP_dual_messages _new_msgs;
+ public:
+ BP_dual_beliefs _beliefs;
+
+ size_t _iters;
+ double _maxdiff;
+
+ struct Properties {
+ typedef BP::Properties::UpdateType UpdateType;
+ UpdateType updates;
+ double tol;
+ size_t maxiter;
+ size_t verbose;
+ } props;
+
+ /// List of property names
+ static const char *PropertyList[];
+ /// Name of this inference algorithm
+ static const char *Name;
+
+ public:
+ void Regenerate(); // used by constructor
+ void RegenerateIndices();
+ void RegenerateMessages();
+ void RegenerateBeliefs();
+
+ void CalcBelief1(size_t i);
+ void CalcBelief2(size_t I);
+ void CalcBeliefs(); // called after run()
+
+ void calcNewM(size_t iI);
+ void calcNewN(size_t iI);
+ void upMsgM(size_t iI);
+ void upMsgN(size_t iI);
+
+/* DAI_ENUM(UpdateType,SEQFIX,SEQRND,SEQMAX,PARALL) */
+ typedef BP::Properties::UpdateType UpdateType;
+ UpdateType Updates() const { return props.updates; }
+ size_t Verbose() const { return props.verbose; }
+
+ /// construct BP_dual object from FactorGraph
+ BP_dual(const FactorGraph & fg, const PropertySet &opts) : DAIAlgFG(fg) {
+ setProperties(opts);
+ Regenerate();
+ }
+
+ DAI_ACCMUT(Prob & msgM(size_t I, size_t i), { return _msgs.m[VV2E(i,I)]; });
+ DAI_ACCMUT(Prob & msgN(size_t i, size_t I), { return _msgs.n[VV2E(i,I)]; });
+ DAI_ACCMUT(Prob & msgM(size_t iI), { return _msgs.m[iI]; });
+ DAI_ACCMUT(Prob & msgN(size_t iI), { return _msgs.n[iI]; });
+ DAI_ACCMUT(Real & zM(size_t I, size_t i), { return _msgs.Zm[VV2E(i,I)]; });
+ DAI_ACCMUT(Real & zN(size_t i, size_t I), { return _msgs.Zn[VV2E(i,I)]; });
+ DAI_ACCMUT(Real & zM(size_t iI), { return _msgs.Zm[iI]; });
+ DAI_ACCMUT(Real & zN(size_t iI), { return _msgs.Zn[iI]; });
+ DAI_ACCMUT(Prob & newMsgM(size_t I, size_t i), { return _new_msgs.m[VV2E(i,I)]; });
+ DAI_ACCMUT(Prob & newMsgN(size_t i, size_t I), { return _new_msgs.n[VV2E(i,I)]; });
+ DAI_ACCMUT(Real & newZM(size_t I, size_t i), { return _new_msgs.Zm[VV2E(i,I)]; });
+ DAI_ACCMUT(Real & newZN(size_t i, size_t I), { return _new_msgs.Zn[VV2E(i,I)]; });
+
+ DAI_ACCMUT(_ind_t & index(size_t i, size_t I), { return( _indices[VV2E(i,I)] ); });
+
+ Real belief1Z(size_t i) const { return _beliefs.Zb1[i]; }
+ Real belief2Z(size_t I) const { return _beliefs.Zb2[I]; }
+
+ size_t doneIters() const { return _iters; }
+
+
+ /// @name General InfAlg interface
+ //@{
+ virtual BP_dual* clone() const { return new BP_dual(*this); }
+/* virtual BP_dual* create() const { return new BP_dual(); } */
+ virtual BP_dual* create() const { return NULL; }
+ virtual std::string identify() const;
+ virtual Factor belief (const Var &n) const { return( belief1( findVar( n ) ) ); }
+ virtual Factor belief (const VarSet &n) const;
+ virtual std::vector<Factor> beliefs() const;
+ virtual Real logZ() const;
+ virtual void init();
+ virtual void init( const VarSet &ns );
+ virtual double run();
+ virtual double maxDiff() const { return _maxdiff; }
+ virtual size_t Iterations() const { return _iters; }
+ //@}
+
+ void init(const vector<size_t>& state);
+ Factor belief1 (size_t i) const { return Factor(var(i), _beliefs.b1[i]); }
+ Factor belief2 (size_t I) const { return Factor(factor(I).vars(), _beliefs.b2[I]); }
+
+ void updateMaxDiff( double maxdiff ) { if( maxdiff > _maxdiff ) _maxdiff = maxdiff; }
+
+ /// Set Props according to the PropertySet opts, where the values can be stored as std::strings or as the type of the corresponding Props member
+ void setProperties( const PropertySet &opts );
+ PropertySet getProperties() const;
+ std::string printProperties() const;
+};
+
+}
+
+#endif
--- /dev/null
+#ifndef ____defined_libdai_cbp_h__
+#define ____defined_libdai_cbp_h__
+
+#include <dai/daialg.h>
+
+#include <dai/cbp.h>
+#include <dai/bbp.h>
+
+using namespace std;
+
+/* Class for "clamped belief propagation".
+
+'chooseNextClamp' can be overridden (e.g. in BPC); this version
+randomly chooses variables to clamp.
+*/
+
+namespace dai {
+
+template<class T>
+vector<T> concat(const vector<T>& u, const vector<T>& v) {
+ vector<T> w;
+ w.reserve(u.size()+v.size());
+ for(size_t i=0; i<u.size(); i++)
+ w.push_back(u[i]);
+ for(size_t i=0; i<v.size(); i++)
+ w.push_back(v[i]);
+ return w;
+}
+
+class CBP : public DAIAlgFG {
+ vector<Factor> _beliefs1;
+ vector<Factor> _beliefs2;
+ double _logZ;
+ double _est_logZ;
+ double _old_est_logZ;
+
+ double _sum_level;
+ size_t _num_leaves;
+
+ size_t maxClampLevel() { return props.max_levels; }
+ double minMaxAdj() { return props.min_max_adj; }
+ double recTol() { return props.rec_tol; }
+
+
+ bbp_cfn_t BBP_cost_function() {
+ return props.bbp_cfn;
+ }
+
+ void printDebugInfo();
+
+ void runRecurse(BP_dual &bp_dual,
+ double orig_logZ,
+ vector<size_t> clamped_vars_list,
+ set<size_t> clamped_vars,
+ size_t &num_leaves,
+ double &sum_level,
+ Real &lz_out, vector<Factor>& beliefs_out);
+
+ BP getBP();
+ BP_dual getBP_dual();
+
+ public:
+ size_t _iters;
+ double _maxdiff;
+
+ struct Properties {
+ typedef BP::Properties::UpdateType UpdateType;
+ DAI_ENUM(RecurseType,REC_FIXED,REC_LOGZ,REC_BDIFF);
+ DAI_ENUM(ChooseMethodType,CHOOSE_RANDOM,CHOOSE_BBP,CHOOSE_BP_ALL);
+ DAI_ENUM(ClampType,CLAMP_VAR,CLAMP_FACTOR);
+
+ double tol;
+ double rec_tol;
+ size_t maxiter;
+ size_t verbose;
+ UpdateType updates;
+ size_t max_levels;
+ double min_max_adj;
+
+ ChooseMethodType choose;
+ RecurseType recursion;
+ ClampType clamp;
+ bbp_cfn_t bbp_cfn;
+ size_t rand_seed;
+ } props;
+
+ Properties::ChooseMethodType ChooseMethod() { return props.choose; }
+ Properties::RecurseType Recursion() { return props.recursion; }
+ Properties::ClampType Clamping() { return props.clamp; }
+
+ //----------------------------------------------------------------
+
+ // construct CBP object from FactorGraph
+ CBP(const FactorGraph &fg, const PropertySet &opts) : DAIAlgFG(fg) {
+ setProperties(opts);
+ construct();
+ }
+
+ static const char *Name;
+ /// List of property names
+ static const char *PropertyList[];
+
+ /// @name General InfAlg interface
+ //@{
+ virtual CBP* clone() const { return new CBP(*this); }
+// virtual CBP* create() const { return new CBP(); }
+ virtual CBP* create() const { throw "Unimplemented"; }
+ virtual std::string identify() const { return string(Name) + printProperties(); }
+ virtual Factor belief (const Var &n) const { return _beliefs1[findVar(n)]; }
+ virtual Factor belief (const VarSet &) const { assert(0); }
+ virtual vector<Factor> beliefs() const { return concat(_beliefs1, _beliefs2); }
+ virtual Real logZ() const { return _logZ; }
+ virtual void init() {};
+ virtual void init( const VarSet & ) {};
+ virtual double run();
+ virtual double maxDiff() const { return _maxdiff; }
+ virtual size_t Iterations() const { return _iters; }
+ //@}
+
+
+ //----------------------------------------------------------------
+
+ virtual bool chooseNextClampVar(BP_dual& bp,
+ vector<size_t> &clamped_vars_list,
+ set<size_t> &clamped_vars,
+ size_t &i, vector<size_t> &xis, Real *maxVarOut);
+
+ //----------------------------------------------------------------
+
+ void setBeliefsFrom(const BP& b);
+ void setBeliefs(const vector<Factor>& bs, double logZ) {
+ size_t i=0;
+ _beliefs1.clear(); _beliefs1.reserve(nrVars());
+ _beliefs2.clear(); _beliefs2.reserve(nrFactors());
+ for(i=0; i<nrVars(); i++) _beliefs1.push_back(bs[i]);
+ for(; i<nrVars()+nrFactors(); i++) _beliefs2.push_back(bs[i]);
+ _logZ = logZ;
+ }
+ Factor belief1 (size_t i) const { return _beliefs1[i]; }
+ Factor belief2 (size_t I) const { return _beliefs2[I]; }
+
+ //----------------------------------------------------------------
+
+ void construct();
+
+ void setProperties( const PropertySet &opts );
+ PropertySet getProperties() const;
+ std::string printProperties() const;
+};
+
+pair<size_t, size_t> bbpFindClampVar(BP_dual &in_bp_dual, bool clampVar, size_t numClamped, bbp_cfn_t cfn, const PropertySet &props, Real *maxVarOut);
+
+}
+
+#endif
friend std::ostream& operator << (std::ostream& os, const FactorGraph& fg);
friend std::istream& operator >> (std::istream& is, FactorGraph& fg);
+ size_t VV2E(size_t n1, size_t n2) const { return G.VV2E(n1,n2); }
+ const Edge& edge(size_t e) const { return G.edge(e); }
+ void indexEdges() { G.indexEdges(); }
+ size_t nr_edges() const { return G.nr_edges(); }
+ const std::vector<Edge>& edges() const { return G.edges(); }
+
private:
/// Part of constructors (creates edges, neighbors and adjacency matrix)
void constructGraph( size_t nrEdges );
#include <iostream>
#include <cstdio>
#include <boost/foreach.hpp>
+#include <boost/functional/hash.hpp>
#include <algorithm>
/// An alias to the BOOST_FOREACH macro from the boost::foreach library
#define foreach BOOST_FOREACH
+#ifdef DAI_DEBUG
+#define DAI_PV(x) do {std::cerr << #x "= " << x << std::endl;} while(0)
+#define DAI_DMSG(str) do {std::cerr << str << std::endl;} while(0)
+#else
+#define DAI_PV(x) do {} while(0)
+#define DAI_DMSG(str) do {} while(0)
+#endif
+
+#define DAI_ACCMUT(x,y) \
+ x y; \
+ const x const y;
+
+#define DAI_IFVERB(n, stmt) if(props.verbose>=n) { cerr << stmt; }
+
/// Real number (alias for double, which could be changed to long double if necessary)
typedef double Real;
/// hash_map is an alias for std::map.
/** Since there is no TR1 unordered_map implementation available yet, we fall back on std::map.
*/
- template <typename T, typename U>
+ template <typename T, typename U, typename H = boost::hash<T> >
class hash_map : public std::map<T,U> {};
#else
/// hash_map is an alias for std::tr1::unordered_map.
/** We use the (experimental) TR1 unordered_map implementation included in modern GCC distributions.
*/
- template <typename T, typename U>
- class hash_map : public std::tr1::unordered_map<T,U> {};
+ template <typename T, typename U, typename H = boost::hash<T> >
+ class hash_map : public std::tr1::unordered_map<T,U,H> {};
#endif
#ifdef DAI_WITH_GIBBS
if( name == Gibbs::Name )
return new Gibbs (fg, opts);
+#endif
+#ifdef DAI_WITH_BP_DUAL
+ if( name == BP_dual::Name )
+ return new BP_dual (fg, opts);
+#endif
+#ifdef DAI_WITH_CBP
+ if( name == CBP::Name )
+ return new CBP (fg, opts);
#endif
DAI_THROW(UNKNOWN_DAI_ALGORITHM);
}
--- /dev/null
+#include <dai/bp.h>
+#include <dai/bbp.h>
+#include <dai/gibbs.h>
+
+// for makeBBPGraph: {
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <iostream>
+#include <fstream>
+// }
+
+namespace dai {
+
+using namespace std;
+
+#define rnd_multi(x) rnd_int(0,(x)-1)
+
+/// function to compute \sld\~w from w, Z_w, \sld w
+Prob unnormAdjoint(const Prob &w, Real Z_w, const Prob &adj_w) {
+ assert(w.size()==adj_w.size());
+ Prob adj_w_unnorm(w.size(),0.0);
+ Real s=0.0;
+ for(size_t i=0; i<w.size(); i++) {
+ s += w[i]*adj_w[i];
+ }
+ for(size_t i=0; i<w.size(); i++) {
+ adj_w_unnorm[i] = (adj_w[i]-s)/Z_w;
+ }
+ return adj_w_unnorm;
+}
+
+/// Function to turn Gibbs state into b1_adj
+/// calls bbp.init with calculated adjoints
+void gibbsInitBBPCostFnAdj(BBP& bbp, const BP_dual &fg, bbp_cfn_t cfn_type, const vector<size_t>* stateP) {
+ if(cfn_type==bbp_cfn_t::bbp_cfn_t::cfn_bethe_ent) {
+ vector<Prob> b1_adj;
+ vector<Prob> b2_adj;
+ vector<Prob> psi1_adj;
+ vector<Prob> psi2_adj;
+ b1_adj.reserve(fg.nrVars());
+ psi1_adj.reserve(fg.nrVars());
+ b2_adj.reserve(fg.nrFactors());
+ psi2_adj.reserve(fg.nrFactors());
+ for(size_t i=0; i<fg.nrVars(); i++) {
+ size_t dim = fg.var(i).states();
+ int c = fg.nbV(i).size();
+ Prob p(dim,0.0);
+ for(size_t xi=0; xi<dim; xi++) {
+ p[xi] = (1-c)*(1+log(fg.belief1(i)[xi]));
+ }
+ b1_adj.push_back(p);
+
+ for(size_t xi=0; xi<dim; xi++) {
+ p[xi] = -fg.belief1(i)[xi];
+ }
+ psi1_adj.push_back(p);
+ }
+ for(size_t I=0; I<fg.nrFactors(); I++) {
+ size_t dim = fg.factor(I).states();
+ Prob p(dim,0.0);
+ for(size_t xI=0; xI<dim; xI++) {
+ p[xI] = 1 + log(fg.belief2(I)[xI]/fg.factor(I).p()[xI]);
+ }
+ b2_adj.push_back(p);
+
+ for(size_t xI=0; xI<dim; xI++) {
+ p[xI] = -fg.belief2(I)[xI]/fg.factor(I).p()[xI];
+ }
+ psi2_adj.push_back(p);
+ }
+ bbp.init(b1_adj, b2_adj, psi1_adj, psi2_adj);
+ } else if(cfn_type==bbp_cfn_t::cfn_factor_ent) {
+ vector<Prob> b2_adj;
+ b2_adj.reserve(fg.nrFactors());
+ for(size_t I=0; I<fg.nrFactors(); I++) {
+ size_t dim = fg.factor(I).states();
+ Prob p(dim,0.0);
+ for(size_t xI=0; xI<dim; xI++) {
+ double bIxI = fg.belief2(I)[xI];
+ if(bIxI<1.0e-15) {
+ p[xI] = -1.0e10;
+ } else {
+ p[xI] = 1+log(bIxI);
+ }
+ }
+ b2_adj.push_back(p);
+ }
+ bbp.init(get_zero_adj_1(fg), b2_adj);
+ } else if(cfn_type==bbp_cfn_t::cfn_var_ent) {
+ vector<Prob> b1_adj;
+ b1_adj.reserve(fg.nrVars());
+ for(size_t i=0; i<fg.nrVars(); i++) {
+ size_t dim = fg.var(i).states();
+ Prob p(dim,0.0);
+ for(size_t xi=0; xi<fg.var(i).states(); xi++) {
+ double bixi = fg.belief1(i)[xi];
+ if(bixi<1.0e-15) {
+ p[xi] = -1.0e10;
+ } else {
+ p[xi] = 1+log(bixi);
+ }
+ }
+ b1_adj.push_back(p);
+ }
+ bbp.init(b1_adj);
+ } else { // cost functions that use Gibbs sample: cfn_b, cfn_b2, cfn_exp
+ vector<size_t> state;
+ if(stateP==NULL) {
+ state = getGibbsState(fg,2*fg.doneIters());
+ } else {
+ state = *stateP;
+ }
+ assert(state.size()==fg.nrVars());
+
+ vector<Prob> b1_adj;
+ b1_adj.reserve(fg.nrVars());
+ for(size_t i=0; i<state.size(); i++) {
+ size_t n = fg.var(i).states();
+ Prob delta(n,0.0);
+ assert(/*0<=state[i] &&*/ state[i] < n);
+ double b = fg.belief1(i)[state[i]];
+ if(cfn_type==bbp_cfn_t::cfn_b) {
+ delta[state[i]] = 1.0;
+ } else if(cfn_type==bbp_cfn_t::cfn_b2) {
+ delta[state[i]] = b;
+ } else if(cfn_type==bbp_cfn_t::cfn_exp) {
+ delta[state[i]] = exp(b);
+ } else { abort(); }
+ b1_adj.push_back(delta);
+ }
+ bbp.init(b1_adj);
+ }
+}
+
+/// This function returns the actual value of the cost function whose
+/// gradient with respect to singleton beliefs is given by
+/// gibbsToB1Adj on the same arguments
+Real gibbsCostFn(const BP_dual &fg, bbp_cfn_t cfn_type, const vector<size_t> *stateP) {
+ double cf=0.0;
+ if(cfn_type==bbp_cfn_t::cfn_bethe_ent) { // ignores state
+ cf = -fg.logZ();
+ } else if(cfn_type==bbp_cfn_t::cfn_var_ent) { // ignores state
+ for(size_t i=0; i<fg.nrVars(); i++) {
+ cf += -fg.belief1(i).entropy();
+ }
+ } else {
+ assert(stateP != NULL);
+ vector<size_t> state = *stateP;
+ assert(state.size()==fg.nrVars());
+ for(size_t i=0; i<fg.nrVars(); i++) {
+ double b = fg.belief1(i)[state[i]];
+ if(cfn_type==bbp_cfn_t::cfn_b) {
+ cf += b;
+ } else if(cfn_type==bbp_cfn_t::cfn_b2) {
+ cf += b*b/2;
+ } else if(cfn_type==bbp_cfn_t::cfn_exp) {
+ cf += exp(b);
+ } else { abort(); }
+ }
+ }
+ return cf;
+}
+
+vector<Prob> get_zero_adj_2(const BP_dual& bp_dual) {
+ vector<Prob> adj_2;
+ adj_2.reserve(bp_dual.nrFactors());
+ for(size_t I=0; I<bp_dual.nrFactors(); I++) {
+ adj_2.push_back(Prob(bp_dual.factor(I).states(),Real(0.0)));
+ }
+ return adj_2;
+}
+
+vector<Prob> get_zero_adj_1(const BP_dual& bp_dual) {
+ vector<Prob> adj_1;
+ adj_1.reserve(bp_dual.nrVars());
+ for(size_t i=0; i<bp_dual.nrVars(); i++) {
+ adj_1.push_back(Prob(bp_dual.var(i).states(),Real(0.0)));
+ }
+ return adj_1;
+}
+
+#define foreach_iI(_fg) \
+ for(size_t i=0, I=0, iI=0; (iI<_fg->nr_edges()) && ((i=_fg->edge(iI).first) || 1) && ((I=_fg->edge(iI).second) || 1); iI++)
+
+#define foreach_iIj(_fg) \
+ for(size_t i=0, I=0, j=0, iIj=0; (iIj<_VFV_ind.size()) && \
+ ((i=get<0>(_VFV_ind[iIj])) || 1) && \
+ ((I=get<1>(_VFV_ind[iIj])) || 1) && \
+ ((j=get<2>(_VFV_ind[iIj])) || 1); iIj++)
+
+#define foreach_IiJ(_fg) \
+ for(size_t I=0, i=0, J=0, IiJ=0; (IiJ<_FVF_ind.size()) && \
+ ((I=get<0>(_FVF_ind[IiJ])) || 1) && \
+ ((i=get<1>(_FVF_ind[IiJ])) || 1) && \
+ ((J=get<2>(_FVF_ind[IiJ])) || 1); IiJ++)
+
+
+#define LOOP_ij(body) { \
+ size_t i_states = _fg->var(i).states(); \
+ size_t j_states = _fg->var(j).states(); \
+ if(_fg->var(i) > _fg->var(j)) { \
+ size_t xij=0; \
+ for(size_t xi=0; xi<i_states; xi++) { \
+ for(size_t xj=0; xj<j_states; xj++) { \
+ body; \
+ xij++; \
+ } \
+ } \
+ } else { \
+ size_t xij=0; \
+ for(size_t xj=0; xj<j_states; xj++) { \
+ for(size_t xi=0; xi<i_states; xi++) { \
+ body; \
+ xij++; \
+ } \
+ } \
+ } \
+ }
+
+void BBP::RegenerateInds() {
+ // initialise _VFV and _VFV_ind
+ {
+ size_t ind=0;
+ _VFV.resize(_fg->nrVars());
+ _VFV_ind.clear();
+ for(size_t i=0; i<_fg->nrVars(); i++) {
+ foreach(size_t I, _fg->nbV(i)) {
+ vector<size_t>& v = _VFV[i][I];
+ v.resize(_fg->nrVars());
+ foreach(size_t j, _fg->nbF(I)) {
+ if(j!=i) {
+ v[j] = ind++;
+ _VFV_ind.push_back(tuple<size_t,size_t,size_t>(i,I,j));
+ }
+ }
+ }
+ }
+ }
+ // initialise _FVF
+ {
+ size_t ind=0;
+ _FVF.resize(_fg->nrFactors());
+ _FVF_ind.clear();
+ for(size_t I=0; I<_fg->nrFactors(); I++) {
+ foreach(size_t i, _fg->nbF(I)) {
+ vector<size_t>& v = _FVF[I][i];
+ v.resize(_fg->nrFactors());
+ foreach(size_t J, _fg->nbV(i)) {
+ if(J!=I) {
+ v[J] = ind++;
+ _FVF_ind.push_back(tuple<size_t,size_t,size_t>(I,i,J));
+ }
+ }
+ }
+ }
+ }
+}
+
+void BBP::RegenerateT() {
+ _T.clear();
+ foreach_iI(_fg) {
+ Prob prod(_fg->var(i).states(),1.0);
+ foreach(size_t J, _fg->nbV(i)) {
+ if(J!=I) {
+ prod *= _bp_dual->msgM(J,i);
+ }
+ }
+ _T.push_back(prod);
+ }
+}
+
+void BBP::RegenerateU() {
+ _U.clear();
+ foreach_iI(_fg) {
+ // Prob prod(_fg->var(i).states(), 1.0);
+ Prob prod(_fg->factor(I).states(), 1.0);
+ foreach(size_t j, _fg->nbF(I)) {
+ if(i!=j) {
+ Prob n_jI(_bp_dual->msgN(j,I));
+ const BP_dual::_ind_t* ind = &(_bp_dual->index(j,I));
+ // multiply prod with n_jI
+ for(size_t x_I = 0; x_I < prod.size(); x_I++)
+ prod[x_I] *= n_jI[(*ind)[x_I]];
+ // prod *= _bp_dual->msgN(j,I);
+ }
+ }
+ _U.push_back(prod);
+ }
+}
+
+void BBP::RegenerateS() {
+ _S.clear();
+ foreach_iIj(_fg) {
+ Factor prod(_fg->factor(I));
+ foreach(size_t k, _fg->nbF(I)) {
+ if(k!=i && k!=j) {
+ if(0) {
+ prod *= Factor(_fg->var(k), _bp_dual->msgN(k,I));
+ } else {
+ const BP_dual::_ind_t& ind = _bp_dual->index(k,I);
+ Prob p(_bp_dual->msgN(k,I));
+ for(size_t xI=0; xI<prod.states(); xI++) {
+ prod.p()[xI] *= p[ind[xI]];
+ }
+ }
+ }
+ }
+ // XXX improve efficiency?
+ // "Marginalize" onto i|j (unnormalized)
+ Prob marg;
+ marg = prod.marginal(VarSet(_fg->var(i)) | VarSet(_fg->var(j)), false).p();
+ _S.push_back(marg);
+ }
+}
+
+void BBP::RegenerateR() {
+ _R.clear();
+ foreach_IiJ(_fg) {
+ Prob prod(_fg->var(i).states(), 1.0);
+ foreach(size_t K, _fg->nbV(i)) {
+ if(K!=I && K!=J) {
+ prod *= _bp_dual->msgM(K,i);
+ }
+ }
+ _R.push_back(prod);
+ }
+}
+
+void BBP::RegenerateInputs() {
+ _adj_b_1_unnorm.clear();
+ for(size_t i=0; i<_fg->nrVars(); i++) {
+ _adj_b_1_unnorm.push_back(unnormAdjoint(_bp_dual->belief1(i).p(), _bp_dual->belief1Z(i), _adj_b_1[i]));
+ }
+ _adj_b_2_unnorm.clear();
+ for(size_t I=0; I<_fg->nrFactors(); I++) {
+ _adj_b_2_unnorm.push_back(unnormAdjoint(_bp_dual->belief2(I).p(), _bp_dual->belief2Z(I), _adj_b_2[I]));
+ }
+}
+
+/// initialise members for factor adjoints (call after RegenerateInputs)
+void BBP::RegeneratePsiAdjoints() {
+ _adj_psi_1.clear();
+ _adj_psi_1.reserve(_fg->nrVars());
+ for(size_t i=0; i<_fg->nrVars(); i++) {
+ Prob p(_adj_b_1_unnorm[i]);
+ assert(p.size()==_fg->var(i).states());
+ foreach(size_t I, _fg->nbV(i)) {
+ p *= _bp_dual->msgM(I,i);
+ }
+ p += _init_adj_psi_1[i];
+ _adj_psi_1.push_back(p);
+ }
+ _adj_psi_2.clear();
+ _adj_psi_2.reserve(_fg->nrFactors());
+ for(size_t I=0; I<_fg->nrFactors(); I++) {
+ Prob p(_adj_b_2_unnorm[I]);
+ assert(p.size()==_fg->factor(I).states());
+ foreach(size_t i, _fg->nbF(I)) {
+ Prob n_iI(_bp_dual->msgN(i,I));
+ const BP_dual::_ind_t* ind = &(_bp_dual->index(i,I));
+ // multiply prod with n_jI
+ for(size_t x_I = 0; x_I < p.size(); x_I++)
+ p[x_I] *= n_iI[(*ind)[x_I]];
+ }
+ p += _init_adj_psi_2[I];
+ _adj_psi_2.push_back(p);
+ }
+}
+
+/// initialise members for messages adjoints (call after RegenerateInputs)
+void BBP::RegenerateMessageAdjoints() {
+ _adj_n.resize(_fg->nr_edges());
+ _adj_m.resize(_fg->nr_edges());
+ _adj_n_unnorm.resize(_fg->nr_edges());
+ _adj_m_unnorm.resize(_fg->nr_edges());
+ _new_adj_n.clear();
+ _new_adj_n.reserve(_fg->nr_edges());
+ for(size_t iI=0; iI<_fg->nr_edges(); iI++) {
+ size_t i = _fg->edge(iI).first;
+ size_t I = _fg->edge(iI).second;
+ Prob prod(_fg->factor(I).p());
+ prod *= _adj_b_2_unnorm[I];
+ foreach(size_t j, _fg->nbF(I)) {
+ if(j!=i) { // for all j in I\i
+ Prob n_jI(_bp_dual->msgN(j,I));;
+ const BP_dual::_ind_t* ind = &(_bp_dual->index(j,I));
+ // multiply prod with n_jI
+ for(size_t x_I = 0; x_I < prod.size(); x_I++)
+ prod[x_I] *= n_jI[(*ind)[x_I]];
+ }
+ }
+ Prob marg( _fg->var(i).states(), 0.0 );
+ // ind is the precalculated Index(i,I) i.e. to x_I == k corresponds x_i == ind[k]
+ const BP_dual::_ind_t* ind = &(_bp_dual->index(i,I));
+ for( size_t r = 0; r < prod.size(); r++ )
+ marg[(*ind)[r]] += prod[r];
+
+ _new_adj_n.push_back(marg);
+ upMsgN(iI); // propagate new _new_adj_n to _adj_n and _adj_n_unnorm
+ }
+ _new_adj_m.clear();
+ _new_adj_m.reserve(_fg->nr_edges());
+ for(size_t iI=0; iI<_fg->nr_edges(); iI++) {
+ size_t i = _fg->edge(iI).first;
+ size_t I = _fg->edge(iI).second;
+ // Prob prod(_fg->var(i).states(),1.0);
+ Prob prod(_adj_b_1_unnorm[i]);
+ assert(prod.size()==_fg->var(i).states());
+ foreach(size_t J, _fg->nbV(i)) {
+ if(J!=I) { // for all J in i\I
+ prod *= _bp_dual->msgM(J,i);
+ }
+ }
+ _new_adj_m.push_back(prod);
+ upMsgM(iI); // propagate new _new_adj_m to _adj_m and _adj_m_unnorm
+ }
+}
+
+void BBP::Regenerate() {
+ RegenerateInds();
+ RegenerateT();
+ RegenerateU();
+ RegenerateS();
+ RegenerateR();
+ RegenerateInputs();
+ RegeneratePsiAdjoints();
+ RegenerateMessageAdjoints();
+ _iters = 0;
+}
+
+void BBP::calcNewN(size_t iI) {
+ size_t i=_fg->edge(iI).first;
+ size_t I=_fg->edge(iI).second;
+ _adj_psi_1[i] += T(i,I)*_adj_n_unnorm[iI];
+ _trip_end_t vfv_iI = _VFV[i][I];
+ Prob &new_adj_n_iI = _new_adj_n[iI];
+ new_adj_n_iI = Prob(_fg->var(i).states(),0.0);
+ foreach(size_t j, _fg->nbF(I)) {
+ if(j!=i) {
+ size_t iIj = vfv_iI[j];
+ Prob &p = _S[iIj];
+ size_t jI = _fg->VV2E(j,I);
+ LOOP_ij(
+ new_adj_n_iI[xi] += p[xij]*_adj_m_unnorm[jI][xj];
+ );
+ }
+ }
+}
+
+void BBP::calcNewM(size_t iI) {
+ size_t i=_fg->edge(iI).first;
+ size_t I=_fg->edge(iI).second;
+
+ Prob p(U(I,i));
+ Prob adj(_adj_m_unnorm[iI]);
+ const BP_dual::_ind_t* ind = &(_bp_dual->index(i,I));
+ for(size_t x_I = 0; x_I < p.size(); x_I++)
+ p[x_I] *= adj[(*ind)[x_I]];
+ _adj_psi_2[I] += p;
+
+ _new_adj_m[iI] = Prob(_fg->var(i).states(),0.0);
+ _trip_end_t fvf_Ii = _FVF[I][i];
+ foreach(size_t J, _fg->nbV(i)) {
+ if(J!=I) {
+ size_t iJ = _fg->VV2E(i,J);
+ _new_adj_m[iI] += _R[fvf_Ii[J]]*_adj_n_unnorm[iJ];
+ }
+ }
+}
+
+void BBP::upMsgM(size_t iI) {
+ _adj_m[iI] = _new_adj_m[iI];
+ _adj_m_unnorm[iI] = unnormAdjoint(_bp_dual->msgM(iI), _bp_dual->zM(iI), _adj_m[iI]);
+}
+
+void BBP::upMsgN(size_t iI) {
+ _adj_n[iI] = _new_adj_n[iI];
+ _adj_n_unnorm[iI] = unnormAdjoint(_bp_dual->msgN(iI), _bp_dual->zN(iI), _adj_n[iI]);
+}
+
+void BBP::doParUpdate() {
+ for(size_t iI=0; iI<_fg->nr_edges(); iI++) {
+ calcNewM(iI);
+ calcNewN(iI);
+ }
+ for(size_t iI=0; iI<_fg->nr_edges(); iI++) {
+ upMsgM(iI);
+ upMsgN(iI);
+ }
+}
+
+Real BBP::getUnMsgMag() {
+ Real s=0.0;
+ for(size_t iI=0; iI<_fg->nr_edges(); iI++) {
+ s += _adj_m_unnorm[iI].sumAbs();
+ s += _adj_n_unnorm[iI].sumAbs();
+ }
+ return s/_fg->nr_edges();
+}
+
+void BBP::getMsgMags(Real &s, Real &new_s) {
+ s=0.0; new_s=0.0;
+ for(size_t iI=0; iI<_fg->nr_edges(); iI++) {
+ s += _adj_m[iI].sumAbs();
+ s += _adj_n[iI].sumAbs();
+ new_s += _new_adj_m[iI].sumAbs();
+ new_s += _new_adj_n[iI].sumAbs();
+ }
+ Real t = _fg->nr_edges();
+ s /= t; new_s /= t;
+}
+
+/// run until change is less than given tolerance
+void BBP::run(Real tol) {
+ size_t minIters = 2;
+ do {
+ _iters++;
+ doParUpdate();
+ } while((_iters < minIters || getUnMsgMag() > tol) && _iters < maxIter());
+ if(_iters==maxIter()) {
+ Real s, new_s;
+ getMsgMags(s,new_s);
+ cerr << "Warning: BBP didn't converge in " << _iters
+ << " iterations (unnorm message magnitude = " << getUnMsgMag()
+ << ", norm message mags = " << s << " -> " << new_s
+ << ")" << endl;
+ }
+}
+
+vector<size_t> getGibbsState(const BP_dual& fg, size_t iters) {
+ PropertySet gibbsProps;
+ gibbsProps.Set("iters", iters);
+ gibbsProps.Set("verbose", oneLess(fg.Verbose()));
+ Gibbs gibbs(fg, gibbsProps);
+ gibbs.run();
+ return gibbs.state();
+}
+
+void testUnnormAdjoint(int n);
+
+/// given a state for each variable, use numerical derivatives
+/// (multiplying a factor containing a variable by psi_1 adjustments)
+/// to verify accuracy of _adj_psi_1.
+/// 'h' controls size of perturbation.
+/// 'bbpTol' controls tolerance of BBP run.
+double numericBBPTest(const BP_dual& bp_dual, bbp_cfn_t cfn, double bbpTol, double h) {
+ // testUnnormAdjoint(4);
+
+ vector<size_t> state = getGibbsState(bp_dual,2*bp_dual.doneIters());
+ // calculate the value of the unperturbed cost function
+ Real cf0 = gibbsCostFn(bp_dual, cfn, &state);
+
+ // run BBP to estimate adjoints
+ BBP bbp(bp_dual);
+ gibbsInitBBPCostFnAdj(bbp, bp_dual, cfn, &state);
+ bbp.run(bbpTol);
+
+ Real d=0;
+
+ if(1) {
+ // verify bbp.adj_psi_1
+
+ // for each variable i
+ for(size_t i=0; i<bp_dual.nrVars(); i++) {
+ vector<double> adj_est;
+ // for each value xi
+ for(size_t xi=0; xi<bp_dual.var(i).states(); xi++) {
+ // create a copy of bp_dual which multiplies this into some factor containing the variable
+ BP_dual bp_dual_prb(bp_dual);
+ assert(bp_dual_prb.nbV(i).size()>0);
+
+ // create a perturbed psi_1[i] (there is no single-variable
+ // psi in libDAI so we initialize to all 1 and then multiply
+ // into nearest multivariate factor)
+ size_t n = bp_dual.var(i).states();
+ Prob psi_1_prb(n, 1.0);
+ psi_1_prb[xi] += h;
+ psi_1_prb.normalize();
+
+ // update bp_dual_prb
+ size_t I = bp_dual_prb.nbV(i)[0]; // use the first factor in list of neighbours of i
+ bp_dual_prb.factor(I) *= Factor(bp_dual.var(i), psi_1_prb);
+ bp_dual_prb.init(bp_dual.var(i)); // reset messages involving i
+ // bp_dual_prb.init();
+
+ // run the copy to convergence
+ bp_dual_prb.run();
+
+ // calculate the new value of the cost function
+ Real cf_prb = gibbsCostFn(bp_dual_prb, cfn, &state);
+
+ // use this to estimate the adjoint for i
+ // XXX why is it off by a factor of 2?
+ adj_est.push_back((cf_prb-cf0)/h);
+ }
+ Prob p_adj_est(adj_est);
+ // compare this numerical estimate to the BBP estimate; sum the distances
+ cerr << "i: " << i
+ << ", p_adj_est: " << p_adj_est
+ << ", bbp.adj_psi_1(i): " << bbp.adj_psi_1(i) << endl;
+ d += dist(p_adj_est, bbp.adj_psi_1(i), Prob::DISTL1);
+ }
+ }
+ if(1) {
+ // verify bbp.adj_n and bbp.adj_m
+
+ // We actually want to check the responsiveness of objective
+ // function to changes in the final messages. But at the end of a
+ // BBP run, the message adjoints are for the initial messages (and
+ // they should be close to zero, see paper). So this resets the
+ // BBP adjoints to the refer to the desired final messages
+ bbp.RegenerateMessageAdjoints();
+
+ // for each variable i
+ for(size_t i=0; i<bp_dual.nrVars(); i++) {
+ // for each factor I ~ i
+ foreach(size_t I, bp_dual.nbV(i)) {
+ vector<double> adj_n_est;
+ // for each value xi
+ for(size_t xi=0; xi<bp_dual.var(i).states(); xi++) {
+ BP_dual bp_dual_prb(bp_dual);
+ // make h-sized change to newMsgN
+ bp_dual_prb.newMsgN(i,I)[xi] += h;
+ // recalculate beliefs
+ bp_dual_prb.CalcBeliefs();
+ // get cost function value
+ Real cf_prb = gibbsCostFn(bp_dual_prb, cfn, &state);
+ // add it to list of adjoints
+ adj_n_est.push_back((cf_prb-cf0)/h);
+ }
+
+ vector<double> adj_m_est;
+ // for each value xi
+ for(size_t xi=0; xi<bp_dual.var(i).states(); xi++) {
+ BP_dual bp_dual_prb(bp_dual);
+ // make h-sized change to newMsgM
+ bp_dual_prb.newMsgM(I,i)[xi] += h;
+ // recalculate beliefs
+ bp_dual_prb.CalcBeliefs();
+ // get cost function value
+ Real cf_prb = gibbsCostFn(bp_dual_prb, cfn, &state);
+ // add it to list of adjoints
+ adj_m_est.push_back((cf_prb-cf0)/h);
+ }
+
+ Prob p_adj_n_est(adj_n_est);
+ // compare this numerical estimate to the BBP estimate; sum the distances
+ cerr << "i: " << i << ", I: " << I
+ << ", adj_n_est: " << p_adj_n_est
+ << ", bbp.adj_n(i,I): " << bbp.adj_n(i,I) << endl;
+ d += dist(p_adj_n_est, bbp.adj_n(i,I), Prob::DISTL1);
+
+ Prob p_adj_m_est(adj_m_est);
+ // compare this numerical estimate to the BBP estimate; sum the distances
+ cerr << "i: " << i << ", I: " << I
+ << ", adj_m_est: " << p_adj_m_est
+ << ", bbp.adj_m(I,i): " << bbp.adj_m(I,i) << endl;
+ d += dist(p_adj_m_est, bbp.adj_m(I,i), Prob::DISTL1);
+ }
+ }
+ }
+ if(0) {
+ // verify bbp.adj_b_1
+ for(size_t i=0; i<bp_dual.nrVars(); i++) {
+ vector<double> adj_b_1_est;
+ // for each value xi
+ for(size_t xi=0; xi<bp_dual.var(i).states(); xi++) {
+ BP_dual bp_dual_prb(bp_dual);
+
+ // make h-sized change to b_1(i)[x_i]
+ bp_dual_prb._beliefs.b1[i][xi] += h;
+
+ // get cost function value
+ Real cf_prb = gibbsCostFn(bp_dual_prb, cfn, &state);
+
+ // add it to list of adjoints
+ adj_b_1_est.push_back((cf_prb-cf0)/h);
+ }
+ Prob p_adj_b_1_est(adj_b_1_est);
+ // compare this numerical estimate to the BBP estimate; sum the distances
+ cerr << "i: " << i
+ << ", adj_b_1_est: " << p_adj_b_1_est
+ << ", bbp.adj_b_1(i): " << bbp.adj_b_1(i) << endl;
+ d += dist(p_adj_b_1_est, bbp.adj_b_1(i), Prob::DISTL1);
+ }
+ }
+
+ // return total of distances
+ return d;
+}
+
+void testUnnormAdjoint(int n) {
+ double h = 1.0e-5;
+ Prob q_unnorm(n);
+ // create a random q_unnorm
+ for(int i=0; i<n; i++) {
+ q_unnorm[i] = exp(4*rnd_stdnormal());
+ }
+ // normalize it to get q and Z_q
+ Prob q(q_unnorm);
+ double Z_q = q.normalize();
+ // cost function V just selects one (random) element of q
+ int vk = rnd_multi(n);
+ double V = q[vk];
+ // calculate adj_q for this V
+ Prob adj_q(n, 0.0);
+ adj_q[vk] = 1;
+ // use unnormAdjoint to get adj_q_unnorm
+ Prob adj_q_unnorm = unnormAdjoint(q, Z_q, adj_q);
+ // with perturbations of q_unnorm, test that adj_q_unnorm is correct
+ Prob adj_q_unnorm_numeric(n, 0.0);
+ for(int i=0; i<n; i++) {
+ Prob q_unnorm_prb = q_unnorm;
+ q_unnorm_prb[i] += h;
+ Prob q_prb(q_unnorm_prb);
+ q_prb.normalize();
+ DAI_PV(q_unnorm_prb);
+ DAI_PV(q_prb);
+ double V_prb = q_prb[vk];
+ adj_q_unnorm_numeric[i] = (V_prb-V)/h;
+ }
+ DAI_PV(q_unnorm);
+ DAI_PV(q);
+ DAI_PV(Z_q);
+ DAI_PV(vk);
+ DAI_PV(V);
+ DAI_PV(adj_q_unnorm);
+ DAI_PV(adj_q_unnorm_numeric);
+}
+
+void makeBBPGraph(const BP_dual& bp_dual, bbp_cfn_t cfn) {
+ double tiny=1.0e-7;
+ vector<size_t> state = getGibbsState(bp_dual,2*bp_dual.doneIters());
+ const char *graphs_dir = "./bbp-data";
+ mkdir(graphs_dir, -1);
+ size_t num_samples=500;
+ for(size_t i=0; i<bp_dual.nrVars(); i++) {
+ for(size_t xi=0; xi<bp_dual.var(i).states(); xi++) {
+ if(bp_dual.belief1(i)[xi]<tiny || abs(bp_dual.belief1(i)[xi]-1)<tiny)
+ continue;
+ char *fn;
+ asprintf(&fn,"%s/i:%d-xi:%d.out",graphs_dir,(int)i,(int)xi);
+ ofstream os(fn, ios_base::out|ios_base::trunc);
+
+ for(size_t n=0; n<num_samples; n++) {
+ double c=((double)n)/(num_samples-1);
+ Prob psi_1_prb(bp_dual.var(i).states(),1.0-c);
+ psi_1_prb[xi] = 1.0;
+
+ BP_dual bp_dual_prb(bp_dual);
+ assert(bp_dual_prb.nbV(i).size()>0);
+ size_t I = bp_dual_prb.nbV(i)[0]; // use the first factor in list of neighbours of i
+ bp_dual_prb.factor(I) *= Factor(bp_dual.var(i), psi_1_prb);
+ bp_dual_prb.init(bp_dual.var(i)); // reset messages involving i
+
+ // run the copy to convergence
+ bp_dual_prb.run();
+
+ Real cf = gibbsCostFn(bp_dual_prb, cfn, &state);
+
+ os << n << "\t" << cf << endl;
+ }
+
+ os.close();
+ free(fn);
+ }
+ }
+}
+
+} // end of namespace dai
--- /dev/null
+
+#include <iostream>
+#include <sstream>
+#include <map>
+#include <set>
+#include <algorithm>
+
+#include <dai/bbp.h>
+//#include <dai/diffs.h>
+#include <dai/util.h>
+//#include "stlutil.h"
+#include <dai/properties.h>
+
+namespace dai {
+
+using namespace std;
+
+const char *BP_dual::Name = "BP_dual";
+
+const char *BP_dual::PropertyList[] = {"tol","maxiter","updates","verbose"};
+
+void BP_dual::setProperties( const PropertySet &opts ) {
+// DAI_DMSG("in BP_dual::setProperties");
+// DAI_PV(opts);
+
+ bool die=false;
+ foreach(const char *p, PropertyList) {
+ if( !opts.hasKey(p) ) {
+ cerr << "BP_dual: missing property " << p << endl;
+ die=true;
+ }
+ }
+ if(die) throw "BP_dual: Couldn't set properties";
+
+ props.tol = opts.getStringAs<double>("tol");
+ props.maxiter = opts.getStringAs<size_t>("maxiter");
+ props.updates = opts.getStringAs<Properties::UpdateType>("updates");
+ props.verbose = opts.getStringAs<size_t>("verbose");
+
+// DAI_PV(printProperties());
+}
+
+PropertySet BP_dual::getProperties() const {
+ PropertySet opts;
+ opts.Set( "tol", props.tol );
+ opts.Set( "maxiter", props.maxiter );
+ opts.Set( "updates", props.updates );
+ opts.Set( "verbose", props.verbose );
+ return opts;
+}
+
+std::string BP_dual::printProperties() const {
+ stringstream s( stringstream::out );
+ s << "[";
+ s << "tol=" << props.tol << ",";
+ s << "maxiter=" << props.maxiter << ",";
+ s << "updates=" << props.updates << ",";
+ s << "verbose=" << props.verbose;
+ s << "]";
+ return s.str();
+}
+
+// void BP_dual::checkProperties() {
+// const char *props[] = {"updates","tol","maxiter","verbose"};
+// for(size_t i=0; i<sizeof(props)/sizeof(*props); i++) {
+// if(!HasProperty(props[i]))
+// die("BP_dual: Missing property \"%s\"", props[i]);
+// }
+
+// ConvertPropertyTo<double>("tol");
+// ConvertPropertyTo<size_t>("maxiter");
+// ConvertPropertyTo<size_t>("verbose");
+// ConvertPropertyTo<UpdateType>("updates");
+// }
+
+void BP_dual::RegenerateIndices() {
+ _indices.clear();
+ _indices.reserve(nr_edges());
+
+ foreach(Edge iI, edges()) {
+ vector<size_t> ind( factor(iI.second).states(), 0 );
+ IndexFor i (var(iI.first), factor(iI.second).vars() );
+ for( size_t j = 0; i >= 0; ++i,++j )
+ ind[j] = i;
+ _indices.push_back( ind );
+ }
+}
+
+void BP_dual::RegenerateMessages() {
+ _msgs.Zn.resize(nr_edges(),1.0);
+ _msgs.Zm.resize(nr_edges(),1.0);
+
+ // clear messages
+ _msgs.m.clear();
+ _msgs.m.reserve(nr_edges());
+ _msgs.n.clear();
+ _msgs.n.reserve(nr_edges());
+
+ // create messages and indices
+ foreach(Edge iI, edges()) {
+ // initialize to uniform distributions
+ _msgs.m.push_back( Prob( var(iI.first).states() ) );
+ _msgs.n.push_back( Prob( var(iI.first).states() ) );
+ }
+
+ // create new_messages
+ _new_msgs = _msgs;
+}
+
+void BP_dual::RegenerateBeliefs() {
+ _beliefs.b1.clear();
+ _beliefs.b1.reserve(nrVars());
+ _beliefs.Zb1.resize(nrVars(), 1.0);
+ _beliefs.b2.clear();
+ _beliefs.b2.reserve(nrFactors());
+ _beliefs.Zb2.resize(nrFactors(), 1.0);
+
+ for(size_t i=0; i<nrVars(); i++) {
+ _beliefs.b1.push_back( Prob( var(i).states() ).setUniform() );
+ }
+ for(size_t I=0; I<nrFactors(); I++) {
+ _beliefs.b2.push_back( Prob( factor(I).states() ).setUniform() );
+ }
+}
+
+// called by constructor, called before 'init'
+void BP_dual::Regenerate() {
+
+ indexEdges(); // so we can use compatibility interface
+
+// DAIAlgFG::Regenerate(); // located in BipartiteGraph
+
+ RegenerateIndices();
+ RegenerateMessages();
+ RegenerateBeliefs();
+
+ _maxdiff = 0;
+ _iters = 0;
+}
+
+void BP_dual::CalcBelief1(size_t i) {
+ Prob prod( var(i).states(), 1.0 );
+ foreach(size_t I, nbV(i)) {
+ prod *= newMsgM(I,i);
+ }
+ _beliefs.Zb1[i] = prod.normalize();
+ _beliefs.b1[i] = prod;
+}
+
+void BP_dual::CalcBelief2(size_t I) {
+ Prob prod( factor(I).p() );
+ foreach(size_t j, nbF(I)) {
+ const _ind_t *ind = &(index(j, I));
+ for(size_t r=0; r<prod.size(); r++) {
+ Prob n(newMsgN(j,I));
+ prod[r] *= n[(*ind)[r]];
+ }
+ }
+ _beliefs.Zb2[I] = prod.normalize();
+ _beliefs.b2[I] = prod;
+}
+
+// called after run()
+void BP_dual::CalcBeliefs() {
+ for(size_t i=0; i<nrVars(); i++) {
+ // calculate b_i
+ CalcBelief1(i);
+ }
+ for(size_t I=0; I<nrFactors(); I++) {
+ // calculate b_I
+ CalcBelief2(I);
+ }
+}
+
+void BP_dual::calcNewM(size_t iI) {
+ // calculate updated message I->i
+ size_t i = edge(iI).first;
+ size_t I = edge(iI).second;
+
+ Prob prod( factor(I).p() );
+
+ foreach(size_t j, nbF(I)) {
+ if( j != i ) { // for all j in I \ i
+ _ind_t* ind = &(index(j,I));
+ Prob n(msgN(j,I));
+ for( size_t r = 0; r < prod.size(); r++ )
+ prod[r] *= n[(*ind)[r]];
+ }
+ }
+
+ // Marginalize onto i
+ Prob marg( var(i).states(), 0.0 );
+ // ind is the precalculated Index(i,I) i.e. to x_I == k corresponds x_i == ind[k]
+ _ind_t* ind = &(index(i,I));
+ for( size_t r = 0; r < prod.size(); r++ )
+ marg[(*ind)[r]] += prod[r];
+
+ _new_msgs.Zm[iI] = marg.normalize();
+ _new_msgs.m[iI] = marg;
+}
+
+void BP_dual::calcNewN(size_t iI) {
+ // XXX optimize
+ // calculate updated message i->I
+ size_t i = edge(iI).first;
+ size_t I = edge(iI).second;
+
+ Prob prod(var(i).states(), 1.0);
+ foreach(size_t J, nbV(i)) {
+ if(J != I) { // for all J in i \ I
+ prod *= msgM(J,i);
+ }
+ }
+ _new_msgs.Zn[iI] = prod.normalize();
+ _new_msgs.n[iI] = prod;
+}
+
+void BP_dual::upMsgM(size_t iI) {
+ _msgs.m[iI] = _new_msgs.m[iI];
+ _msgs.Zm[iI] = _new_msgs.Zm[iI];
+}
+
+void BP_dual::upMsgN(size_t iI) {
+ _msgs.n[iI] = _new_msgs.n[iI];
+ _msgs.Zn[iI] = _new_msgs.Zn[iI];
+}
+
+double BP_dual::run() {
+ DAI_IFVERB(1, "Starting " << identify() << "..." << endl);
+
+ double tic = toc();
+ // for some reason we need 2* here, where orig BP doesn't
+ Diffs diffs(2*nrVars(), 1.0);
+
+ vector<size_t> edge_seq;
+ vector<double> residuals;
+
+ vector<Factor> old_beliefs;
+ old_beliefs.reserve( nrVars() );
+ for( size_t i = 0; i < nrVars(); i++ ) {
+ CalcBelief1(i);
+ old_beliefs.push_back(belief1(i));
+ }
+
+ size_t iter = 0;
+
+ if( Updates() == UpdateType::SEQMAX ) {
+ // do the first pass
+ for(size_t iI = 0; iI < nr_edges(); iI++ ) {
+ calcNewM(iI);
+ calcNewN(iI);
+ }
+
+ // calculate initial residuals
+ residuals.reserve(nr_edges());
+ for( size_t iI = 0; iI < nr_edges(); iI++ )
+ residuals.push_back( dist( _new_msgs.m[iI], _msgs.m[iI], Prob::DISTLINF ) );
+ } else {
+ edge_seq.reserve( nr_edges() );
+ for( size_t i = 0; i < nr_edges(); i++ )
+ edge_seq.push_back( i );
+ }
+
+ // do several passes over the network until maximum number of iterations has
+ // been reached or until the maximum belief difference is smaller than tolerance
+ for( iter=0; iter < props.maxiter && diffs.maxDiff() > props.tol; iter++ ) {
+ if( Updates() == UpdateType::SEQMAX ) {
+ // Residuals-BP by Koller et al.
+ for( size_t t = 0; t < nr_edges(); t++ ) {
+ // update the message with the largest residual
+ size_t iI = max_element(residuals.begin(), residuals.end()) - residuals.begin();
+ upMsgM(iI);
+ residuals[iI] = 0;
+
+ // I->i has been updated, which means that residuals for all
+ // J->j with J in nb[i]\I and j in nb[J]\i have to be updated
+ size_t i = edge(iI).first;
+ size_t I = edge(iI).second;
+ foreach(size_t J, nbV(i)) {
+ if(J != I) {
+ size_t iJ = VV2E(i,J);
+ calcNewN(iJ);
+ upMsgN(iJ);
+ foreach(size_t j, nbF(J)) {
+ if(j != i) {
+ size_t jJ = VV2E(j,J);
+ calcNewM(jJ);
+ residuals[jJ] = dist( _new_msgs.m[jJ], _msgs.m[jJ], Prob::DISTLINF );
+ }
+ }
+ }
+ }
+ }
+ } else if( Updates() == UpdateType::PARALL ) {
+ // Parallel updates
+ for( size_t t = 0; t < nr_edges(); t++ ) {
+ calcNewM(t);
+ calcNewN(t);
+ }
+ if(0) {
+ for(size_t t=0; t<nr_edges(); t++) {
+ upMsgM(t); upMsgN(t);
+ }
+ } else {
+ _msgs = _new_msgs;
+ }
+ } else {
+ // Sequential updates
+ if( Updates() == UpdateType::SEQRND )
+ random_shuffle( edge_seq.begin(), edge_seq.end() );
+
+ foreach(size_t k, edge_seq) {
+ calcNewM(k);
+ calcNewN(k);
+ upMsgM(k);
+ upMsgN(k);
+ }
+ }
+
+ // calculate new beliefs and compare with old ones
+ for( size_t i = 0; i < nrVars(); i++ ) {
+ CalcBelief1(i);
+ Factor nb( belief1(i) );
+ diffs.push( dist( nb, old_beliefs[i], Prob::DISTLINF ) );
+ old_beliefs[i] = nb;
+ }
+
+ DAI_IFVERB(3,"BP_dual::run: maxdiff " << diffs.maxDiff() << " after " << iter+1 << " passes" << endl);
+
+ _iters++;
+ }
+
+ updateMaxDiff( diffs.maxDiff() );
+
+ if( props.verbose >= 1 ) {
+ if( diffs.maxDiff() > props.tol ) {
+ DAI_IFVERB(1, endl << "BP_dual::run: WARNING: not converged within " << props.maxiter << " passes (" << toc() - tic << " clocks)...final maxdiff:" << diffs.maxDiff() << endl);
+ } else {
+ DAI_IFVERB(3, "BP_dual::run: converged in " << iter << " passes (" << toc() - tic << " clocks)." << endl);
+ }
+ }
+
+ CalcBeliefs();
+
+ return diffs.maxDiff();
+}
+
+string BP_dual::identify() const {
+ return string(Name) + printProperties();
+}
+
+vector<Factor> BP_dual::beliefs() const {
+ vector<Factor> result;
+ for( size_t i = 0; i < nrVars(); i++ )
+ result.push_back( belief1(i) );
+ for( size_t I = 0; I < nrFactors(); I++ )
+ result.push_back( belief2(I) );
+ return result;
+}
+
+Factor BP_dual::belief( const VarSet &ns ) const {
+ if( ns.size() == 1 )
+ return belief( *(ns.begin()) );
+ else {
+ size_t I;
+ for( I = 0; I < nrFactors(); I++ )
+ if( factor(I).vars() >> ns )
+ break;
+ assert( I != nrFactors() );
+ return belief2(I).marginal(ns);
+ }
+}
+
+Real BP_dual::logZ() const {
+ Real sum = 0.0;
+ for(size_t i = 0; i < nrVars(); i++ )
+ sum += Real(1.0 - nbV(i).size()) * belief1(i).entropy();
+ for( size_t I = 0; I < nrFactors(); I++ )
+ sum -= dist( belief2(I), factor(I), Prob::DISTKL );
+ return sum;
+}
+
+// reset only messages to/from certain variables
+void BP_dual::init(const VarSet &ns) {
+ _iters=0;
+ foreach(Var n, ns) {
+ size_t ni = findVar(n);
+ size_t st = n.states();
+ foreach(Neighbor I, nbV(ni)) {
+ msgM(I.node,ni).fill(1.0/st);
+ zM(I.node,ni) = 1.0;
+ msgN(ni,I.node).fill(1.0/st);
+ zN(ni,I.node) = 1.0;
+ }
+ }
+}
+
+void BP_dual::init() {
+ _iters=0;
+ for(size_t iI = 0; iI < nr_edges(); iI++ ) {
+ _msgs.m[iI].setUniform();
+ _msgs.Zm[iI] = 1;
+ _msgs.n[iI].setUniform();
+ _msgs.Zn[iI] = 1;
+ }
+ _new_msgs = _msgs;
+}
+
+
+void BP_dual::init(const vector<size_t>& state) {
+ _iters=0;
+ for(size_t iI = 0; iI < nr_edges(); iI++ ) {
+ size_t i = edge(iI).first;
+ _msgs.m[iI].fill(0.1);
+ _msgs.m[iI][state[i]]=1;
+ _msgs.Zm[iI] = _msgs.m[iI].normalize();
+ _msgs.n[iI].fill(0.1);
+ _msgs.n[iI][state[i]]=1;
+ _msgs.Zn[iI] = _msgs.n[iI].normalize();
+ }
+ _new_msgs = _msgs;
+}
+
+void _clamp(FactorGraph &g, const Var & n, const vector<size_t> &is ) {
+ Factor mask_n(n,0.0);
+
+ foreach(size_t i, is) { assert( i <= n.states() ); mask_n[i] = 1.0; }
+
+ for( size_t I = 0; I < g.nrFactors(); I++ )
+ if( g.factor(I).vars() && n )
+ g.factor(I) *= mask_n;
+}
+
+// clamp a factor to have one of a set of values
+void _clampFactor(FactorGraph &g, size_t I, const vector<size_t> &is) {
+ size_t st = g.factor(I).states();
+ Prob mask_n(st,0.0);
+
+ foreach(size_t i, is) { assert( i <= st ); mask_n[i] = 1.0; }
+
+ g.factor(I).p() *= mask_n;
+}
+
+} // end of namespace dai
--- /dev/null
+#include <iostream>
+#include <sstream>
+#include <map>
+#include <set>
+#include <algorithm>
+
+#include <dai/util.h>
+#include <dai/properties.h>
+
+#include <dai/bp.h>
+#include <dai/cbp.h>
+#include <dai/bbp.h>
+
+using namespace std;
+
+namespace dai {
+
+const char *CBP::Name = "CBP";
+
+const char *CBP::PropertyList[] = {"updates","tol","rec_tol","maxiter","verbose","max_levels","min_max_adj","choose","clamp","recursion","bbp_cfn","rand_seed"};
+
+#define rnd_multi(x) rnd_int(0,(x)-1)
+
+void CBP::setProperties(const PropertySet &opts) {
+// DAI_DMSG("in CBP::setProperties");
+// DAI_PV(opts);
+ foreach(const char* p, PropertyList) {
+ if(!opts.hasKey(p)) {
+ // XXX probably leaks pointer?
+ throw (string("CBP: Missing property ")+p).c_str();
+ }
+ }
+
+ props.tol = opts.getStringAs<double>("tol");
+ props.rec_tol = opts.getStringAs<double>("rec_tol");
+ props.maxiter = opts.getStringAs<size_t>("maxiter");
+ props.verbose = opts.getStringAs<size_t>("verbose");
+ props.updates = opts.getStringAs<Properties::UpdateType>("updates");
+ props.max_levels = opts.getStringAs<size_t>("max_levels");
+ props.min_max_adj = opts.getStringAs<double>("min_max_adj");
+ props.choose = opts.getStringAs<Properties::ChooseMethodType>("choose");
+ props.recursion = opts.getStringAs<Properties::RecurseType>("recursion");
+ props.clamp = opts.getStringAs<Properties::ClampType>("clamp");
+ props.bbp_cfn = opts.getStringAs<bbp_cfn_t>("bbp_cfn");
+ props.rand_seed = opts.getStringAs<size_t>("rand_seed");
+}
+
+PropertySet CBP::getProperties() const {
+ PropertySet opts;
+ opts.Set( "tol", props.tol );
+ opts.Set( "rec_tol", props.rec_tol );
+ opts.Set( "maxiter", props.maxiter );
+ opts.Set( "verbose", props.verbose );
+ opts.Set( "updates", props.updates );
+ opts.Set( "max_levels", props.max_levels );
+ opts.Set( "min_max_adj", props.min_max_adj );
+ opts.Set( "choose", props.choose );
+ opts.Set( "recursion", props.recursion );
+ opts.Set( "clamp", props.clamp );
+ opts.Set( "bbp_cfn", props.bbp_cfn );
+ opts.Set( "rand_seed", props.rand_seed );
+ return opts;
+}
+
+std::string CBP::printProperties() const {
+ stringstream s( stringstream::out );
+ s << "[";
+ s << "tol=" << props.tol << ",";
+ s << "rec_tol=" << props.rec_tol << ",";
+ s << "maxiter=" << props.maxiter << ",";
+ s << "verbose=" << props.verbose << ",";
+ s << "updates=" << props.updates << ",";
+ s << "max_levels=" << props.max_levels << ",";
+ s << "min_max_adj=" << props.min_max_adj << ",";
+ s << "choose=" << props.choose << ",";
+ s << "recursion=" << props.recursion << ",";
+ s << "clamp=" << props.clamp << ",";
+ s << "bbp_cfn=" << props.bbp_cfn << ",";
+ s << "rand_seed=" << props.rand_seed << ",";
+ s << "]";
+ return s.str();
+}
+
+void CBP::construct() {
+// DAIAlgFG::Regenerate();
+ indexEdges();
+
+ _beliefs1.clear(); _beliefs1.reserve(nrVars());
+ for( size_t i = 0; i < nrVars(); i++ )
+ _beliefs1.push_back( Factor(var(i)).normalized() );
+
+ _beliefs2.clear(); _beliefs2.reserve(nrFactors());
+ for( size_t I = 0; I < nrFactors(); I++ ) {
+ Factor f = factor(I);
+ f.fill(1); f.normalize();
+ _beliefs2.push_back(f);
+ }
+
+ // to compute average level
+ _sum_level = 0;
+ _num_leaves = 0;
+
+ _maxdiff = 0;
+ _iters = 0;
+}
+
+static
+vector<Factor> mixBeliefs(Real p, vector<Factor> b, vector<Factor> c) {
+ vector<Factor> out;
+ assert(b.size()==c.size());
+ out.reserve(b.size());
+ Real pc = 1-p;
+ for(size_t i=0; i<b.size(); i++) {
+ // XXX probably already normalized
+ out.push_back(b[i].normalized()*p+
+ c[i].normalized()*pc);
+ }
+ return out;
+}
+
+double CBP::run() {
+// BP bp(getBP());
+// InfAlg *bp = newInfAlg( GetPropertyAs<string>("bp_alg"), *this, GetPropertyAs<Properties>("bp_opts") );
+ size_t seed = props.rand_seed;
+ if(seed>0) rnd_seed(seed);
+
+ BP_dual bp_dual(getBP_dual());
+ bp_dual.init();
+ bp_dual.run();
+ _iters += bp_dual.Iterations();
+
+ vector<Factor> beliefs_out;
+ Real lz_out;
+ runRecurse(bp_dual, bp_dual.logZ(),
+ vector<size_t>(0), set<size_t>(),
+ _num_leaves, _sum_level,
+ lz_out, beliefs_out);
+ if(props.verbose>=1) {
+ cerr << "CBP average levels = " << (_sum_level/_num_leaves) << ", leaves = " << _num_leaves << endl;
+ }
+ setBeliefs(beliefs_out, lz_out);
+ return 0;
+}
+
+BP CBP::getBP() {
+ PropertySet bpProps;
+ bpProps.Set("updates", string("PARALL"));
+ bpProps.Set("tol", props.tol);
+ bpProps.Set("maxiter", props.maxiter);
+ bpProps.Set("verbose", oneLess(props.verbose));
+ BP bp(*this,bpProps);
+ bp.init();
+ return bp;
+}
+
+BP_dual CBP::getBP_dual() {
+ PropertySet bpProps;
+ bpProps.Set("updates", string("PARALL"));
+ bpProps.Set("tol", props.tol);
+ bpProps.Set("maxiter", props.maxiter);
+ bpProps.Set("verbose", oneLess(props.verbose));
+// cerr << "In getBP_dual" << endl;
+// DAI_PV(bpProps);
+ BP_dual bp_dual(*this,bpProps);
+ return bp_dual;
+}
+
+vector<size_t> complement(vector<size_t>& xis, size_t n_states) {
+ vector<size_t> cmp_xis(0);
+ size_t j=0;
+ for(size_t xi=0; xi<n_states; xi++) {
+ while(j<xis.size() && xis[j]<xi) j++;
+ if(j>=xis.size() || xis[j]>xi) cmp_xis.push_back(xi);
+ }
+ assert( xis.size()+cmp_xis.size() == n_states );
+ return cmp_xis;
+}
+
+Real max(Real x,Real y) { return x>y?x:y; }
+
+Real unSoftMax(Real lz, Real cmp_lz) {
+ double m = max(lz, cmp_lz);
+ lz -= m; cmp_lz -= m;
+ double p = exp(lz)/(exp(lz)+exp(cmp_lz));
+ return p;
+}
+
+Real logSumExp(Real lz, Real cmp_lz) {
+ double m = max(lz, cmp_lz);
+ lz -= m; cmp_lz -= m;
+ return m+log(exp(lz)+exp(cmp_lz));
+}
+
+Real dist(const vector<Factor>& b1, const vector<Factor>& b2, size_t nv) {
+ Real d=0.0;
+ for(size_t k=0; k<nv; k++) {
+ d += dist( b1[k], b2[k], Prob::DISTLINF );
+ }
+ return d;
+}
+
+
+void CBP::runRecurse(BP_dual &bp_dual,
+ double orig_logZ,
+ vector<size_t> clamped_vars_list,
+ set<size_t> clamped_vars,
+ size_t &num_leaves,
+ double &sum_level,
+ Real &lz_out, vector<Factor>& beliefs_out) {
+ // choose a variable/states to clamp:
+ size_t i;
+ vector<size_t> xis;
+ Real maxVar=0.0;
+ bool found;
+ bool clampVar = (Clamping()==Properties::ClampType::CLAMP_VAR);
+
+ // XXX fix to just pass orig_logZ
+
+ if(Recursion()==Properties::RecurseType::REC_LOGZ && recTol()>0 &&
+ exp(bp_dual.logZ()-orig_logZ) < recTol()) {
+ found = false;
+ } else {
+ found = chooseNextClampVar(bp_dual,
+ clamped_vars_list,
+ clamped_vars,
+ i, xis, &maxVar);
+ }
+
+ if(!found) {
+ num_leaves++;
+ sum_level += clamped_vars_list.size();
+ beliefs_out = bp_dual.beliefs();
+ lz_out = bp_dual.logZ();
+ return;
+ }
+
+ if(clampVar) {
+ foreach(size_t xi, xis) { assert(/*0<=xi &&*/ xi<var(i).states()); }
+ } else {
+ foreach(size_t xI, xis) { assert(/*0<=xI &&*/ xI<factor(i).states()); }
+ }
+ // - otherwise, clamp and recurse, saving margin estimates for each
+ // clamp setting. afterwards, combine estimates.
+
+ // compute complement of 'xis'
+ vector<size_t> cmp_xis=complement(xis, clampVar?var(i).states():factor(i).states());
+
+ // XXX could do this more efficiently with a nesting version of
+ // saveProbs/undoProbs
+ Real lz; vector<Factor> b;
+ BP_dual bp_dual_c(bp_dual);
+ if(clampVar) {
+ _clamp((FactorGraph&)bp_dual_c, var(i), xis);
+ bp_dual_c.init(var(i));
+ } else {
+ _clampFactor((FactorGraph&)bp_dual_c, i, xis);
+ bp_dual_c.init(factor(i).vars());
+ }
+ bp_dual_c.run();
+ _iters += bp_dual_c.Iterations();
+
+ lz = bp_dual_c.logZ();
+ b = bp_dual_c.beliefs();
+
+ Real cmp_lz; vector<Factor> cmp_b;
+ BP_dual cmp_bp_dual_c(bp_dual);
+ if(clampVar) {
+ _clamp(cmp_bp_dual_c,var(i),cmp_xis);
+ cmp_bp_dual_c.init(var(i));
+ } else {
+ _clampFactor(cmp_bp_dual_c,i,cmp_xis);
+ cmp_bp_dual_c.init(factor(i).vars());
+ }
+ cmp_bp_dual_c.run();
+ _iters += cmp_bp_dual_c.Iterations();
+
+ cmp_lz = cmp_bp_dual_c.logZ();
+ cmp_b = cmp_bp_dual_c.beliefs();
+
+ double p = unSoftMax(lz, cmp_lz);
+ Real bp__d=0.0;
+
+ if(Recursion()==Properties::RecurseType::REC_BDIFF && recTol() > 0) {
+ vector<Factor> combined_b(mixBeliefs(p,b,cmp_b));
+ Real new_lz = logSumExp(lz,cmp_lz);
+ bp__d = dist(bp_dual.beliefs(),combined_b,nrVars());
+ if(exp(new_lz-orig_logZ)*bp__d < recTol()) {
+ num_leaves++;
+ sum_level += clamped_vars_list.size();
+ beliefs_out = combined_b;
+ lz_out = new_lz;
+ return;
+ }
+ }
+
+ // either we are not doing REC_BDIFF or the distance was large
+ // enough to recurse:
+
+ runRecurse(bp_dual_c, orig_logZ,
+ clamped_vars_list,
+ clamped_vars,
+ num_leaves, sum_level, lz, b);
+ runRecurse(cmp_bp_dual_c, orig_logZ,
+ clamped_vars_list,
+ clamped_vars,
+ num_leaves, sum_level, cmp_lz, cmp_b);
+
+ p = unSoftMax(lz,cmp_lz);
+
+ beliefs_out = mixBeliefs(p, b, cmp_b);
+ lz_out = logSumExp(lz,cmp_lz);
+
+ if(props.verbose>=2) {
+ Real d = dist( bp_dual.beliefs(), beliefs_out, nrVars() );
+ cerr << "Distance (clamping " << i << "): " << d;
+ if(Recursion()==Properties::RecurseType::REC_BDIFF)
+ cerr << "; bp_dual predicted " << bp__d;
+ cerr << "; max adjoint = " << maxVar << "; level = " << clamped_vars_list.size() << endl;
+ }
+}
+
+// 'xis' must be sorted
+bool CBP::chooseNextClampVar(BP_dual& bp,
+ vector<size_t> &clamped_vars_list,
+ set<size_t> &clamped_vars,
+ size_t &i, vector<size_t> &xis, Real *maxVarOut) {
+ Real tiny=1.0e-14;
+ if(props.verbose>=3) {
+ cerr << "clamped_vars_list" << clamped_vars_list << endl;
+ }
+ if(clamped_vars_list.size() >= maxClampLevel()) {
+ return false;
+ }
+ if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_RANDOM) {
+ if(Clamping()==Properties::ClampType::CLAMP_VAR) {
+ int t=0, t1=100;
+ do {
+ i = rnd_multi(nrVars());
+ t++;
+ } while(abs(bp.belief1(i).p().max()-1) < tiny &&
+ t < t1);
+ if(t==t1) {
+ return false;
+ // die("Too many levels requested in CBP");
+ }
+ // only pick probable values for variable
+ size_t xi;
+ do {
+ xi = rnd_multi(var(i).states());
+ t++;
+ } while(bp.belief1(i).p()[xi] < tiny && t<t1);
+ assert(t<t1);
+ xis.resize(1, xi);
+ // assert(!_clamped_vars.count(i)); // not true for >2-ary variables
+ DAI_IFVERB(2, endl<<"CHOOSE_RANDOM chose variable "<<i<<" state "<<xis[0]<<endl);
+ } else {
+ int t=0, t1=100;
+ do {
+ i = rnd_multi(nrFactors());
+ t++;
+ } while(abs(bp.belief2(i).p().max()-1) < tiny &&
+ t < t1);
+ if(t==t1) {
+ return false;
+ // die("Too many levels requested in CBP");
+ }
+ // only pick probable values for variable
+ size_t xi;
+ do {
+ xi = rnd_multi(factor(i).states());
+ t++;
+ } while(bp.belief2(i).p()[xi] < tiny && t<t1);
+ assert(t<t1);
+ xis.resize(1, xi);
+ // assert(!_clamped_vars.count(i)); // not true for >2-ary variables
+ DAI_IFVERB(2, endl<<"CHOOSE_RANDOM chose factor "<<i<<" state "<<xis[0]<<endl);
+ }
+ } else if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BP_ALL) {
+ // try clamping each variable manually
+ assert(Clamping()==Properties::ClampType::CLAMP_VAR);
+ Real max_diff=0.0;
+ int win_k=-1, win_xk=-1;
+ for(size_t k=0; k<nrVars(); k++) {
+ for(size_t xk=0; xk<var(k).states(); xk++) {
+ if(bp.belief1(k)[xk]<tiny) continue;
+ BP_dual bp1(bp);
+ bp1.clamp(var(k), xk);
+ bp1.init(var(k));
+ bp1.run();
+ Real diff=0;
+ for(size_t j=0; j<nrVars(); j++) {
+ diff += dist(bp.belief1(j), bp1.belief1(j), Prob::DISTL1);
+ }
+ if(diff>max_diff) {
+ max_diff=diff; win_k=k; win_xk=xk;
+ }
+ }
+ }
+ assert(win_k>=0); assert(win_xk>=0);
+ i = win_k; xis.resize(1, win_xk);
+ } else if(ChooseMethod()==Properties::ChooseMethodType::CHOOSE_BBP) {
+ Real mvo; if(!maxVarOut) maxVarOut = &mvo;
+ bool clampVar = (Clamping()==Properties::ClampType::CLAMP_VAR);
+ pair<size_t, size_t> cv =
+ bbpFindClampVar(bp,
+ clampVar,
+ clamped_vars_list.size(),
+ BBP_cost_function(),getProperties(),maxVarOut);
+
+ // if slope isn't big enough then don't clamp
+ if(*maxVarOut < minMaxAdj()) return false;
+
+ size_t xi=cv.second;
+ i = cv.first;
+#define VAR_INFO (clampVar?"variable ":"factor ") \
+ << i << " state " << xi \
+ << " (p=" << (clampVar?bp.belief1(i)[xi]:bp.belief2(i)[xi]) \
+ << ", entropy = " << (clampVar?bp.belief1(i):bp.belief2(i)).entropy() \
+ << ", maxVar = "<< *maxVarOut << ")"
+ Prob b = (clampVar?bp.belief1(i).p():bp.belief2(i).p());
+ if(b[xi] < tiny) {
+ cerr << "Warning, bbpFindClampVar found unlikely "
+ << VAR_INFO << endl;
+ return false;
+ }
+ if(abs(b[xi]-1) < tiny) {
+ cerr << "Warning, bbpFindClampVar found overly likely "
+ << VAR_INFO << endl;
+ return false;
+ }
+
+ xis.resize(1,xi);
+ if(clampVar) {
+ assert(/*0<=xi &&*/ xi<var(i).states());
+ } else {
+ assert(/*0<=xi &&*/ xi<factor(i).states());
+ }
+ DAI_IFVERB(2, "CHOOSE_BBP (num clamped = " << clamped_vars_list.size()
+ << ") chose " << i << " state " << xi << endl);
+ } else {
+ abort();
+ }
+ clamped_vars_list.push_back(i);
+ clamped_vars.insert(i);
+ return true;
+}
+
+// void CBP::clamp(const Var & n, size_t i) {
+// FactorGraph::clamp(n,i);
+// _clamped_vars.insert(findVar(n));
+// _clamped_vars_list.push_back(findVar(n));
+// }
+
+// void CBP::clamp(const Var & n, const vector<size_t> &is) {
+// FactorGraph::clamp(n,is);
+// _clamped_vars.insert(findVar(n));
+// _clamped_vars_list.push_back(findVar(n));
+// }
+
+void CBP::printDebugInfo() {
+ DAI_PV(_beliefs1);
+ DAI_PV(_beliefs2);
+ DAI_PV(_logZ);
+}
+
+//----------------------------------------------------------------
+
+bool doBBPTest=false;
+bool doBBPGraph=false;
+size_t bbpGraphLevel=3;
+
+#define BPP_INIT_GIBBS 1
+
+/// function which takes a factor graph as input, runs Gibbs and BP_dual,
+/// creates and runs a BBP object, finds best variable, returns
+/// (variable,state) pair for clamping
+// pair<size_t, size_t> bbpFindClampVar(const CBP &fg, bbp_cfn_t cfn, const Properties &props, Real *maxVarOut) {
+pair<size_t, size_t> bbpFindClampVar(BP_dual &in_bp_dual, bool clampVar,
+ size_t numClamped, bbp_cfn_t cfn, const PropertySet &props, Real *maxVarOut) {
+#if BPP_INIT_GIBBS
+ vector<size_t> state = getGibbsState(in_bp_dual, 100);
+ in_bp_dual.init(state);
+ in_bp_dual.run();
+#endif
+
+ Real ourTol = doBBPTest ? 1.0e-11 : 1.0e-3;
+ if(0) {
+ PropertySet bp_Props;
+ bp_Props.Set("updates", string("PARALL"));
+ // bp_Props.Set("tol", props.GetAs<double>("tol"));
+ bp_Props.Set("tol", ourTol);
+ bp_Props.Set("maxiter", props.GetAs<size_t>("maxiter"));
+ bp_Props.Set("verbose", oneLess(props.GetAs<size_t>("verbose")));
+ // bp_Props.ConvertTo<BP_dual::UpdateType>("updates");
+ // DAI_PV(bp_Props.GetAs<BP_dual::UpdateType>("updates"));
+ BP_dual bp_dual(in_bp_dual, bp_Props);
+#if BPP_INIT_GIBBS
+ bp_dual.init(state);
+#endif
+ bp_dual.run();
+ }
+
+ if(doBBPGraph && numClamped == bbpGraphLevel) {
+ cerr << "Writing BBP graph data" << endl;
+ makeBBPGraph(in_bp_dual,cfn);
+ doBBPGraph=false; // only do it once
+ cerr << "Done writing BBP graph data" << endl;
+ }
+ if(doBBPTest) {
+ double err = numericBBPTest(in_bp_dual, cfn, /*bbp tol*/ ourTol, /*h*/ 1.0e-5);
+ cerr << "Error from numericBBPTest: " << err << endl;
+ }
+ Real tic1=toc();
+ BBP bbp(in_bp_dual);
+ bbp.maxIter() = props.GetAs<size_t>("maxiter");
+#if BPP_INIT_GIBBS
+ gibbsInitBBPCostFnAdj(bbp, in_bp_dual, cfn, &state);
+#else
+ gibbsInitBBPCostFnAdj(bbp, in_bp_dual, cfn, NULL);
+#endif
+ Real tic2=toc();
+ bbp.run(ourTol);
+ if(props.GetAs<size_t>("verbose") >= 3) {
+ cerr << "BBP took " << toc()-tic1 << " seconds (BBP.run = " << toc()-tic2 << " seconds), "
+ << bbp.doneIters() << " iterations" << endl;
+ }
+
+ // find and return the (variable,state) with the largest adj_psi_1
+ size_t argmax_var=0;
+ size_t argmax_var_state=0;
+ Real max_var=0;
+ if(clampVar) {
+ for(size_t i=0; i<in_bp_dual.nrVars(); i++) {
+ Prob adj_psi_1 = bbp.adj_psi_1(i);
+ if(0) {
+ // helps to account for amount of movement possible in variable
+ // i's beliefs? seems not..
+ adj_psi_1 *= in_bp_dual.belief1(i).entropy();
+ }
+ // try to compensate for effect on same variable (doesn't work)
+ // adj_psi_1[gibbs.state()[i]] -= bp_dual.belief1(i)[gibbs.state()[i]]/10;
+ pair<size_t,Real> argmax_state = adj_psi_1.argmax();
+
+ if(i==0 || argmax_state.second>max_var) {
+ argmax_var = i;
+ max_var = argmax_state.second;
+ argmax_var_state = argmax_state.first;
+ }
+ }
+ assert(/*0 <= argmax_var_state &&*/
+ argmax_var_state < in_bp_dual.var(argmax_var).states());
+ } else {
+ for(size_t I=0; I<in_bp_dual.nrFactors(); I++) {
+ Prob adj_psi_2 = bbp.adj_psi_2(I);
+ if(0) {
+ // helps to account for amount of movement possible in variable
+ // i's beliefs? seems not..
+ adj_psi_2 *= in_bp_dual.belief2(I).entropy();
+ }
+ // try to compensate for effect on same variable (doesn't work)
+ // adj_psi_1[gibbs.state()[i]] -= bp_dual.belief1(i)[gibbs.state()[i]]/10;
+ pair<size_t,Real> argmax_state = adj_psi_2.argmax();
+
+ if(I==0 || argmax_state.second>max_var) {
+ argmax_var = I;
+ max_var = argmax_state.second;
+ argmax_var_state = argmax_state.first;
+ }
+ }
+ assert(/*0 <= argmax_var_state &&*/
+ argmax_var_state < in_bp_dual.factor(argmax_var).states());
+ }
+ if(maxVarOut) *maxVarOut = max_var;
+ return make_pair(argmax_var,argmax_var_state);
+}
+
+} // end of namespace dai
# --- BP ----------------------
+BP: BP[updates=SEQFIX,tol=1e-9,maxiter=10000,logdomain=0]
BP_SEQFIX: BP[updates=SEQFIX,tol=1e-9,maxiter=10000,logdomain=0]
BP_SEQRND: BP[updates=SEQRND,tol=1e-9,maxiter=10000,logdomain=0]
BP_SEQMAX: BP[updates=SEQMAX,tol=1e-9,maxiter=10000,logdomain=0]
GIBBS_1e7: GIBBS[iters=10000000]
GIBBS_1e8: GIBBS[iters=100000000]
GIBBS_1e9: GIBBS[iters=1000000000]
+
+# --- BP_dual ---------------------
+
+BP_dual: BP_dual[updates=SEQFIX,tol=1e-9,maxiter=10000,verbose=0]
+BP_dual_SEQFIX: BP_dual[updates=SEQFIX,tol=1e-9,maxiter=10000,verbose=0]
+
+# --- CBP ---------------------
+
+CBP: CBP[max_levels=12,updates=SEQMAX,tol=1e-9,rec_tol=1e-9,maxiter=100,choose=CHOOSE_RANDOM,recursion=REC_FIXED,clamp=CLAMP_VAR,min_max_adj=1.0e-9,bbp_cfn=cfn_b,verbose=0,rand_seed=0]
+BBP: CBP[choose=CHOOSE_BBP]