# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+# Enable/disable various approximate inference methods
+WITH_BP = true
+WITH_MF = true
+WITH_HAK = true
+WITH_LC = true
+WITH_TREEEP = true
+WITH_JTREE = true
+WITH_MR = true
+DEBUG = true
+NEW_MATLAB = true
+WITH_MATLAB =
+
# Directories
INC = include/dai
SRC = src
CC = g++
# Flags for the C++ compiler
-CCFLAGS = -Wall -W -Wextra -fpic -I./include -Llib -O3 -g -DDAI_DEBUG #-static #-pg #-DVERBOSE
+CCFLAGS = -Wall -W -Wextra -fpic -I./include -Llib -O3 #-pg #-static -DVERBOSE
+ifdef DEBUG
+CCFLAGS := $(CCFLAGS) -g -DDAI_DEBUG
+else
+CCFLAGS := $(CCFLAGS)
+endif
+
+ifdef WITH_BP
+CCFLAGS := $(CCFLAGS) -DWITH_BP
+OBJECTS := $(OBJECTS) bp.o
+endif
+ifdef WITH_MF
+CCFLAGS := $(CCFLAGS) -DWITH_MF
+OBJECTS := $(OBJECTS) mf.o
+endif
+ifdef WITH_HAK
+CCFLAGS := $(CCFLAGS) -DWITH_HAK
+OBJECTS := $(OBJECTS) hak.o
+endif
+ifdef WITH_LC
+CCFLAGS := $(CCFLAGS) -DWITH_LC
+OBJECTS := $(OBJECTS) lc.o
+endif
+ifdef WITH_TREEEP
+CCFLAGS := $(CCFLAGS) -DWITH_TREEEP
+OBJECTS := $(OBJECTS) treeep.o
+endif
+ifdef WITH_JTREE
+CCFLAGS := $(CCFLAGS) -DWITH_JTREE
+OBJECTS := $(OBJECTS) jtree.o
+endif
+ifdef WITH_MR
+CCFLAGS := $(CCFLAGS) -DWITH_MR
+OBJECTS := $(OBJECTS) mr.o
+endif
-# To enable the Matlab interface, define WITH_MATLAB = yes
-WITH_MATLAB =
ifdef WITH_MATLAB
# Replace the following by the directory where Matlab has been installed
MATLABDIR = /opt/matlab/bin
-MEX = $(MATLABDIR)/mex
-MEXFLAGS = -g -I. -DDAI_DEBUG -largeArrayDims #-g means debugging
-endif
-
# Replace the following with the extension of compiled MEX files on this platform, e.g. .mexglx for x86
MEXEXT = .mexglx
+MEX = $(MATLABDIR)/mex
+MEXFLAGS = -I.
+ifdef DEBUG
+MEXFLAGS := $(MEXFLAGS) -g -DDAI_DEBUG
+endif
+ifdef NEW_MATLAB
+MEXFLAGS := $(MEXFLAGS) -largeArrayDims
+else
+MEXFLAGS := $(MEXFLAGS) -DSMALLMEM
+endif
+endif
HEADERS = $(INC)/bipgraph.h $(INC)/diffs.h $(INC)/index.h $(INC)/var.h $(INC)/factor.h $(INC)/varset.h $(INC)/prob.h $(INC)/daialg.h $(INC)/properties.h $(INC)/alldai.h $(INC)/enum.h $(INC)/x2x.h
-# target matlabs is disabled by default since it only compiles with a very recent MatLab version
-TARGETS = tests utils $(LIB)/libdai.a example testregression
+TARGETS = tests utils $(LIB)/libdai.a example testregression doc
ifdef WITH_MATLAB
TARGETS := $(TARGETS) matlabs
endif
matlabs : matlab/dai.$(MEXEXT) matlab/dai_readfg.$(MEXEXT) matlab/dai_writefg.$(MEXEXT) matlab/dai_removeshortloops.$(MEXEXT) matlab/dai_potstrength.$(MEXEXT)
-$(LIB)/libdai.a : daialg.o alldai.o bp.o clustergraph.o factorgraph.o hak.o jtree.o lc.o mf.o mr.o properties.o regiongraph.o util.o treeep.o weightedgraph.o x2x.o
- ar rcs $(LIB)/libdai.a daialg.o alldai.o bp.o clustergraph.o factorgraph.o hak.o jtree.o lc.o mf.o mr.o properties.o regiongraph.o util.o treeep.o weightedgraph.o x2x.o
+$(LIB)/libdai.a : daialg.o alldai.o clustergraph.o factorgraph.o properties.o regiongraph.o util.o weightedgraph.o x2x.o $(OBJECTS)
+ ar rcs $(LIB)/libdai.a daialg.o alldai.o clustergraph.o factorgraph.o properties.o regiongraph.o util.o weightedgraph.o x2x.o $(OBJECTS)
tests : tests/test
testregression : tests/test
echo Testing...this can take a while...
- cd tests; ./testregression; cd ..
+ cd tests; time ./testregression; cd ..
doc : $(INC)/*.h $(SRC)/*.cpp doxygen.conf
doxygen doxygen.conf
clean :
- rm *.o *.$(MEXEXT) example matlab/*.$(MEXEXT) matlab/*.o tests/test utils/fg2dot utils/createfg utils/remove_short_loops utils/fginfo $(LIB)/libdai.a; echo
+ rm *.o example matlab/*.$(MEXEXT) matlab/*.o tests/test utils/fg2dot utils/createfg utils/remove_short_loops utils/fginfo $(LIB)/libdai.a; echo
rm -R doc; echo
# TESTS
########
-tests/test : tests/test.cpp $(HEADERS) lib/libdai.a
+tests/test : tests/test.cpp $(HEADERS) $(LIB)/libdai.a
$(CC) $(CCFLAGS) -o tests/test tests/test.cpp -ldai $(BOOSTFLAGS)
# MATLAB INTERFACE
###################
-matlab/dai.$(MEXEXT) : matlab/dai.cpp $(HEADERS) matlab/matlab.o daialg.o alldai.o bp.o clustergraph.o factorgraph.o hak.o jtree.o lc.o mf.o mr.o properties.o regiongraph.o util.o treeep.o weightedgraph.o x2x.o
- $(MEX) $(MEXFLAGS) -o matlab/dai matlab/dai.cpp matlab/matlab.o daialg.o alldai.o bp.o clustergraph.o factorgraph.o hak.o jtree.o lc.o mf.o mr.o properties.o regiongraph.o util.o treeep.o weightedgraph.o x2x.o
+matlab/dai.$(MEXEXT) : matlab/dai.cpp $(HEADERS) matlab/matlab.o $(LIB)/libdai.a
+ $(MEX) $(MEXFLAGS) -o matlab/dai matlab/dai.cpp matlab/matlab.o $(LIB)/libdai.a
matlab/dai_readfg.$(MEXEXT) : matlab/dai_readfg.cpp $(HEADERS) factorgraph.o matlab/matlab.o
$(MEX) $(MEXFLAGS) -o matlab/dai_readfg matlab/dai_readfg.cpp factorgraph.o matlab/matlab.o
#include <string>
#include <dai/daialg.h>
-#include <dai/bp.h>
-#include <dai/lc.h>
-#include <dai/hak.h>
-#include <dai/mf.h>
-#include <dai/jtree.h>
-#include <dai/treeep.h>
-#include <dai/mr.h>
+#ifdef WITH_BP
+ #include <dai/bp.h>
+#endif
+#ifdef WITH_MF
+ #include <dai/mf.h>
+#endif
+#ifdef WITH_HAK
+ #include <dai/hak.h>
+#endif
+#ifdef WITH_LC
+ #include <dai/lc.h>
+#endif
+#ifdef WITH_TREEEP
+ #include <dai/treeep.h>
+#endif
+#ifdef WITH_JTREE
+ #include <dai/jtree.h>
+#endif
+#ifdef WITH_MR
+ #include <dai/mr.h>
+#endif
namespace dai {
InfAlg *newInfAlg( const std::string &name, const FactorGraph &fg, const Properties &opts );
-/// AINames contains the names of all approximate inference algorithms
-static const char* DAINames[] = {BP::Name, MF::Name, HAK::Name, LC::Name, TreeEP::Name, MR::Name, JTree::Name};
+/// DAINames contains the names of all approximate inference algorithms
+
+static const char* DAINames[] = {
+#ifdef WITH_BP
+ BP::Name,
+#endif
+#ifdef WITH_MF
+ MF::Name,
+#endif
+#ifdef WITH_HAK
+ HAK::Name,
+#endif
+#ifdef WITH_LC
+ LC::Name,
+#endif
+#ifdef WITH_TREEEP
+ TreeEP::Name,
+#endif
+#ifdef WITH_JTREE
+ JTree::Name,
+#endif
+#ifdef WITH_MR
+ MR::Name,
+#endif
+ ""
+};
} // end of namespace dai
#include <vector>
#include <algorithm>
-#include <boost/numeric/ublas/vector_sparse.hpp>
+#include <dai/util.h>
namespace dai {
-/// A BipartiteGraph represents a graph with two types of nodes
-template <class T1, class T2> class BipartiteGraph {
+/// A BipartiteGraph represents a bipartite graph, with two types of nodes (both are numbered
+/// as 0,1,2,...), with edges only between nodes of different type. The edges are stored as
+/// lists of adjacent nodes for each node.
+class BipartiteGraph {
public:
- typedef std::pair<size_t,size_t> _edge_t;
- typedef std::vector<size_t> _nb_t;
- typedef _nb_t::const_iterator _nb_cit;
-
+ /// A Neighbor describes a neighboring node of some other node.
+ /** Iterating over all neighbors of some node i can be done in the following way:
+ * \code
+ * foreach( const BipartiteGraph::Neighbor &I, nb1(i) ) {
+ * size_t _I = I.iter;
+ * size_t _i = I.dual;
+ * // I == I.node;
+ * // The _I'th neighbor of i is I, and the _i'th neighbor of I is i:
+ * // nb1(i)[_I] == I, nb2(I)[_i] == i
+ * }
+ * \endcode
+ */
+ struct Neighbor {
+ /// iter corresponds to the index of this Neighbor entry in the list of neighbors
+ unsigned iter;
+ /// node contains the number of the neighboring node
+ unsigned node;
+ /// dual contains the "dual" iter
+ unsigned dual;
+ /// cast to unsigned returns node
+ operator unsigned () const { return node; };
+ };
+
+ /// Neighbors is a vector of Neigbor entries
+ typedef std::vector<Neighbor> Neighbors;
private:
- /// Vertices of first type
- std::vector<T1> _V1;
-
- /// Vertices of second type
- std::vector<T2> _V2;
-
- /// Edges, which are pairs (v1,v2) with v1 in _V1 and v2 in _V2
- std::vector<_edge_t> _E12;
-
- /// Conversion matrix that computes which index of _E12 corresponds to a vertex index pair (v1,v2)
- std::vector<boost::numeric::ublas::mapped_vector<size_t> > _E12ind;
-
- /// Neighbour indices of vertices of first type
- std::vector<_nb_t> _nb1;
-
- /// Neighbour indices of vertices of second type
- std::vector<_nb_t> _nb2;
-
+ /// _nb1 contains for each node of the first kind a list of its neighbors
+ std::vector<Neighbors> _nb1;
+ /// _nb2 contains for each node of the second kind a list of its neighbors
+ std::vector<Neighbors> _nb2;
public:
/// Default constructor
- BipartiteGraph<T1,T2> () {};
+ BipartiteGraph() : _nb1(), _nb2() {}
/// Copy constructor
- BipartiteGraph<T1,T2> ( const BipartiteGraph<T1,T2> & x ) : _V1(x._V1), _V2(x._V2), _E12(x._E12), _E12ind(x._E12ind), _nb1(x._nb1), _nb2(x._nb2) {};
+ BipartiteGraph( const BipartiteGraph & x ) : _nb1(x._nb1), _nb2(x._nb2) {}
/// Assignment operator
- BipartiteGraph<T1,T2> & operator=(const BipartiteGraph<T1,T2> & x) {
+ BipartiteGraph & operator=( const BipartiteGraph & x ) {
if( this != &x ) {
- _V1 = x._V1;
- _V2 = x._V2;
- _E12 = x._E12;
- _E12ind = x._E12ind;
- _nb1 = x._nb1;
- _nb2 = x._nb2;
+ _nb1 = x._nb1;
+ _nb2 = x._nb2;
}
return *this;
}
+
+ /// Create bipartite graph from a range of edges, encoded as pairs of node numbers
+ /// (more precisely, a std::pair<unsigned, unsigned> where the first integer corresponds
+ /// to the node of the first type, and the second integer corresponds to the node of the
+ /// second type). nr1 is the number of nodes of the first type, nr2 the number of nodes
+ /// of the second type.
+ template<typename EdgeInputIterator>
+ void create( size_t nr1, size_t nr2, EdgeInputIterator begin, EdgeInputIterator end );
+
+ /// Construct bipartite graph from a range of edges, encoded as pairs of node numbers
+ /// (more precisely, a std::pair<unsigned, unsigned> where the first integer corresponds
+ /// to the node of the first type, and the second integer corresponds to the node of the
+ /// second type). nr1 is the number of nodes of the first type, nr2 the number of nodes
+ /// of the second type.
+ template<typename EdgeInputIterator>
+ BipartiteGraph( size_t nr1, size_t nr2, EdgeInputIterator begin, EdgeInputIterator end ) : _nb1( nr1 ), _nb2( nr2 ) {
+ create( nr1, nr2, begin, end );
+ }
+
+ /// Returns constant reference to the _i2'th neighbor of node i1 of first type
+ const Neighbor & nb1( size_t i1, size_t _i2 ) const { return _nb1[i1][_i2]; }
+ /// Returns reference to the _i2'th neighbor of node i1 of first type
+ Neighbor & nb1( size_t i1, size_t _i2 ) { return _nb1[i1][_i2]; }
+
+ /// Returns constant reference to the _i1'th neighbor of node i2 of second type
+ const Neighbor & nb2( size_t i2, size_t _i1 ) const { return _nb2[i2][_i1]; }
+ /// Returns reference to the _i1'th neighbor of node i2 of second type
+ Neighbor & nb2( size_t i2, size_t _i1 ) { return _nb2[i2][_i1]; }
+
+ /// Returns constant reference to all neighbors of node of first type
+ const Neighbors & nb1( size_t i1 ) const { return _nb1[i1]; }
+ /// Returns reference to all neighbors of node of first type
+ Neighbors & nb1( size_t i1 ) { return _nb1[i1]; }
+
+ /// Returns constant reference to all neighbors of node of second type
+ const Neighbors & nb2( size_t i2 ) const { return _nb2[i2]; }
+ /// Returns reference to all neighbors of node of second type
+ Neighbors & nb2( size_t i2 ) { return _nb2[i2]; }
+
+ /// Returns number of nodes of first type
+ size_t nr1() const { return _nb1.size(); }
+ /// Returns number of nodes of second type
+ size_t nr2() const { return _nb2.size(); }
- /// Provides read access to node of first type
- const T1 & V1( size_t i1 ) const { return _V1[i1]; }
- /// Provides full access to node of first type
- T1 & V1( size_t i1 ) { return _V1[i1]; }
- /// Provides read access to all nodes of first type
- const std::vector<T1> & V1s() const { return _V1; }
- /// Provides full access to all nodes of first type
- std::vector<T1> & V1s() { return _V1; }
-
- /// Provides read access to node of second type
- const T2 & V2( size_t i2 ) const { return _V2[i2]; }
- /// Provides full access to node of second type
- T2 & V2( size_t i2 ) { return _V2[i2]; }
- /// Provides read access to all nodes of second type
- const std::vector<T2> & V2s() const { return _V2; }
- /// Provides full access to all nodes of second type
- std::vector<T2> & V2s() { return _V2; }
-
- /// Provides read access to edge
- const _edge_t & edge(size_t ind) const { return _E12[ind]; }
- /// Provides full access to edge
- _edge_t & edge(size_t ind) { return _E12[ind]; }
- /// Provides read access to all edges
- const std::vector<_edge_t> & edges() const { return _E12; }
- /// Provides full access to all edges
- std::vector<_edge_t> & edges() { return _E12; }
- /// Returns number of edges
- size_t nr_edges() const { return _E12.size(); }
-
- /// Provides read access to neighbours of node of first type
- const _nb_t & nb1( size_t i1 ) const { return _nb1[i1]; }
- /// Provides full access to neighbours of node of first type
- _nb_t & nb1( size_t i1 ) { return _nb1[i1]; }
-
- /// Provides read access to neighbours of node of second type
- const _nb_t & nb2( size_t i2 ) const { return _nb2[i2]; }
- /// Provides full access to neighbours of node of second type
- _nb_t & nb2( size_t i2 ) { return _nb2[i2]; }
-
- /// Converts the pair of indices (i1,i2) to the corresponding edge index
- size_t VV2E( const size_t i1, const size_t i2 ) const { return _E12ind[i1](i2); }
-
-
- /// Regenerates internal structures (_E12ind, _nb1 and _nb2) based upon _V1, _V2 and _E12
- void Regenerate() {
- // Calculate _nb1 and _nb2
-
- // Start with empty vectors
- _nb1.clear();
- _nb1.resize(_V1.size());
- // Start with empty vectors
- _nb2.clear();
- _nb2.resize(_V2.size());
- // Each edge yields a neighbour pair
- for( std::vector<_edge_t>::const_iterator e = _E12.begin(); e != _E12.end(); e++ ) {
- _nb1[e->first].push_back(e->second);
- _nb2[e->second].push_back(e->first);
- }
- // Remove duplicates from _nb1
- for( size_t i1 = 0; i1 < _V1.size(); i1++ ) {
- _nb_t::iterator new_end = unique(_nb1[i1].begin(), _nb1[i1].end());
- _nb1[i1].erase( new_end, _nb1[i1].end() );
- }
- // Remove duplicates from _nb2
- for( size_t i2 = 0; i2 < _V2.size(); i2++ ) {
- _nb_t::iterator new_end = unique(_nb2[i2].begin(), _nb2[i2].end());
- _nb2[i2].erase( new_end, _nb2[i2].end() );
- }
+ /// Calculates the number of edges
+ size_t nrEdges() const {
+ size_t sum = 0;
+ for( size_t i1 = 0; i1 < nr1(); i1++ )
+ sum += nb1(i1).size();
+ return sum;
+ }
- // Calculate _E12ind
- _E12ind = std::vector<boost::numeric::ublas::mapped_vector<size_t> >(_V1.size(), boost::numeric::ublas::mapped_vector<size_t>(_V2.size()) );
- for( size_t k = 0; k < _E12.size(); k++ )
- _E12ind[_E12[k].first](_E12[k].second) = k;
-
+ /// Returns true if the graph is connected
+ /// FIXME: this should be optimized
+ bool isConnected() const {
+ if( nr1() == 0 ) {
+ return true;
+ } else {
+ std::vector<bool> incomponent1( nr1(), false );
+ std::vector<bool> incomponent2( nr2(), false );
+
+ incomponent1[0] = true;
+ bool found_new_nodes;
+ do {
+ found_new_nodes = false;
+
+ // For all nodes of second type, check if they are connected with the (growing) component
+ for( size_t n2 = 0; n2 < nr2(); n2++ )
+ if( !incomponent2[n2] ) {
+ foreach( const Neighbor &n1, nb2(n2) ) {
+ if( incomponent1[n1] ) {
+ found_new_nodes = true;
+ incomponent2[n2] = true;
+ break;
+ }
+ }
+ }
+
+ // For all nodes of first type, check if they are connected with the (growing) component
+ for( size_t n1 = 0; n1 < nr1(); n1++ )
+ if( !incomponent1[n1] ) {
+ foreach( const Neighbor &n2, nb1(n1) ) {
+ if( incomponent2[n2] ) {
+ found_new_nodes = true;
+ incomponent1[n1] = true;
+ break;
+ }
+ }
+ }
+ } while( found_new_nodes );
+
+ // Check if there are remaining nodes (not in the component)
+ bool all_connected = true;
+ for( size_t n1 = 0; (n1 < nr1()) && all_connected; n1++ )
+ if( !incomponent1[n1] )
+ all_connected = false;
+ for( size_t n2 = 0; (n2 < nr2()) && all_connected; n2++ )
+ if( !incomponent2[n2] )
+ all_connected = false;
+
+ return all_connected;
+ }
}
+
};
+template<typename EdgeInputIterator>
+void BipartiteGraph::create( size_t nr1, size_t nr2, EdgeInputIterator begin, EdgeInputIterator end ) {
+ _nb1.clear();
+ _nb1.resize( nr1 );
+ _nb2.clear();
+ _nb2.resize( nr2 );
+
+ for( EdgeInputIterator e = begin; e != end; e++ ) {
+ // Each edge yields a neighbor pair
+ Neighbor nb_1;
+ nb_1.iter = _nb1[e->first].size();
+ nb_1.node = e->second;
+ nb_1.dual = _nb2[e->second].size();
+
+ Neighbor nb_2;
+ nb_2.iter = nb_1.dual;
+ nb_2.node = e->first;
+ nb_2.dual = nb_1.iter;
+
+ _nb1[e->first].push_back( nb_1 );
+ _nb2[e->second].push_back( nb_2 );
+ }
+}
+
+
} // end of namespace dai
class BP : public DAIAlgFG {
protected:
- typedef std::vector<size_t> _ind_t;
+ typedef std::vector<size_t> ind_t;
+ struct EdgeProp {
+ ind_t index;
+ Prob message;
+ Prob newMessage;
+ double residual;
+ };
- std::vector<_ind_t> _indices;
- std::vector<Prob> _messages, _newmessages;
+ std::vector<std::vector<EdgeProp> > edges;
public:
ENUM4(UpdateType,SEQFIX,SEQRND,SEQMAX,PARALL)
// default constructor
BP() : DAIAlgFG() {};
// copy constructor
- BP(const BP & x) : DAIAlgFG(x), _indices(x._indices), _messages(x._messages), _newmessages(x._newmessages) {};
+ BP(const BP & x) : DAIAlgFG(x), edges(x.edges) {};
BP* clone() const { return new BP(*this); }
// construct BP object from FactorGraph
BP(const FactorGraph & fg, const Properties &opts) : DAIAlgFG(fg, opts) {
BP & operator=(const BP & x) {
if(this!=&x) {
DAIAlgFG::operator=(x);
- _messages = x._messages;
- _newmessages = x._newmessages;
- _indices = x._indices;
+ edges = x.edges;
}
return *this;
}
static const char *Name;
- Prob & message(size_t i1, size_t i2) { return( _messages[VV2E(i1,i2)] ); }
- const Prob & message(size_t i1, size_t i2) const { return( _messages[VV2E(i1,i2)] ); }
- Prob & newMessage(size_t i1, size_t i2) { return( _newmessages[VV2E(i1,i2)] ); }
- const Prob & newMessage(size_t i1, size_t i2) const { return( _newmessages[VV2E(i1,i2)] ); }
- _ind_t & index(size_t i1, size_t i2) { return( _indices[VV2E(i1,i2)] ); }
- const _ind_t & index(size_t i1, size_t i2) const { return( _indices[VV2E(i1,i2)] ); }
+ Prob & message(size_t i, size_t _I) { return edges[i][_I].message; }
+ const Prob & message(size_t i, size_t _I) const { return edges[i][_I].message; }
+ Prob & newMessage(size_t i, size_t _I) { return edges[i][_I].newMessage; }
+ const Prob & newMessage(size_t i, size_t _I) const { return edges[i][_I].newMessage; }
+ ind_t & index(size_t i, size_t _I) { return edges[i][_I].index; }
+ const ind_t & index(size_t i, size_t _I) const { return edges[i][_I].index; }
+ double & residual(size_t i, size_t _I) { return edges[i][_I].residual; }
+ const double & residual(size_t i, size_t _I) const { return edges[i][_I].residual; }
+ void findMaxResidual( size_t &i, size_t &_I );
std::string identify() const;
void Regenerate();
void init();
- void calcNewMessage(size_t iI);
+ void calcNewMessage( size_t i, size_t _I );
double run();
- Factor belief1 (size_t i) const;
- Factor belief2 (size_t I) const;
+ Factor beliefV (size_t i) const;
+ Factor beliefF (size_t I) const;
Factor belief (const Var &n) const;
Factor belief (const VarSet &n) const;
std::vector<Factor> beliefs() const;
/// Clamp variable n to value i (i.e. multiply with a Kronecker delta \f$\delta_{x_n, i}\f$)
virtual void clamp( const Var & n, size_t i ) = 0;
- /// Return all variables that interact with n
- virtual VarSet delta( const Var & n ) const = 0;
+ /// Return all variables that interact with var(i)
+ virtual VarSet delta( size_t i ) const = 0;
- /// Set all factors interacting with n to 1
- virtual void makeCavity( const Var & n ) = 0;
-
- /// Set factor I to 1
- virtual void makeFactorCavity( size_t I ) = 0;
+ /// Set all factors interacting with var(i) to 1
+ virtual void makeCavity( size_t i ) = 0;
/// Get index of variable n
virtual size_t findVar( const Var & n ) const = 0;
/// Clamp variable n to value i (i.e. multiply with a Kronecker delta \f$\delta_{x_n, i}\f$) (using T::clamp)
void clamp( const Var & n, size_t i ) { T::clamp( n, i ); }
- /// Return all variables that interact with n (using T::delta)
- VarSet delta( const Var & n ) const { return T::delta(n); }
-
- /// Set all factors interacting with n to 1 (using T::makeCavity)
- void makeCavity( const Var & n ) { T::makeCavity(n); }
+ /// Return all variables that interact with var(i) (using T::delta)
+ VarSet delta( size_t i ) const { return T::delta( i ); }
- /// Set factor I to 1 (using T::makeFactorCavity)
- void makeFactorCavity( size_t I ) { T::makeFactorCavity(I); }
+ /// Set all factors interacting with var(i) to 1 (using T::makeCavity)
+ void makeCavity( size_t i ) { T::makeCavity( i ); }
/// Get index of variable n (using T::findVar)
size_t findVar( const Var & n ) const { return T::findVar(n); }
void RemoveShortLoops( std::vector<Factor> &P );
-class FactorGraph : public BipartiteGraph<Var,Factor> {
+class FactorGraph {
+ public:
+ BipartiteGraph G;
+ std::vector<Var> vars;
+ std::vector<Factor> factors;
+ typedef BipartiteGraph::Neighbor Neighbor;
+ typedef BipartiteGraph::Neighbors Neighbors;
+
protected:
- std::map<size_t,Prob> _undoProbs;
- Prob::NormType _normtype;
+ std::map<size_t,Prob> _undoProbs;
+ Prob::NormType _normtype;
public:
/// Default constructor
- FactorGraph() : BipartiteGraph<Var,Factor>(), _undoProbs(), _normtype(Prob::NORMPROB) {};
+ FactorGraph() : G(), vars(), factors(), _undoProbs(), _normtype(Prob::NORMPROB) {};
/// Copy constructor
- FactorGraph(const FactorGraph & x) : BipartiteGraph<Var,Factor>(x), _undoProbs(), _normtype(x._normtype) {};
+ FactorGraph(const FactorGraph & x) : G(x.G), vars(x.vars), factors(x.factors), _undoProbs(x._undoProbs), _normtype(x._normtype) {};
/// Construct FactorGraph from vector of Factors
FactorGraph(const std::vector<Factor> &P);
// Construct a FactorGraph from given factor and variable iterators
/// Assignment operator
FactorGraph & operator=(const FactorGraph & x) {
- if(this!=&x) {
- BipartiteGraph<Var,Factor>::operator=(x);
- _undoProbs = x._undoProbs;
- _normtype = x._normtype;
+ if( this != &x ) {
+ G = x.G;
+ vars = x.vars;
+ factors = x.factors;
+ _undoProbs = x._undoProbs;
+ _normtype = x._normtype;
}
return *this;
}
virtual ~FactorGraph() {}
// aliases
- Var & var(size_t i) { return V1(i); }
- const Var & var(size_t i) const { return V1(i); }
- const std::vector<Var> & vars() const { return V1s(); }
- std::vector<Var> & vars() { return V1s(); }
- size_t nrVars() const { return V1s().size(); }
- Factor & factor(size_t I) { return V2(I); }
- const Factor & factor(size_t I) const { return V2(I); }
- const std::vector<Factor> & factors() const { return V2s(); }
- std::vector<Factor> & factors() { return V2s(); }
- size_t nrFactors() const { return V2s().size(); }
-
- /// Provides read access to neighbours of variable
- const _nb_t & nbV( size_t i1 ) const { return nb1(i1); }
- /// Provides full access to neighbours of variable
- _nb_t & nbV( size_t i1 ) { return nb1(i1); }
- /// Provides read access to neighbours of factor
- const _nb_t & nbF( size_t i2 ) const { return nb2(i2); }
- /// Provides full access to neighbours of factor
- _nb_t & nbF( size_t i2 ) { return nb2(i2); }
+ Var & var(size_t i) { return vars[i]; }
+ const Var & var(size_t i) const { return vars[i]; }
+ Factor & factor(size_t I) { return factors[I]; }
+ const Factor & factor(size_t I) const { return factors[I]; }
+
+ size_t nrVars() const { return vars.size(); }
+ size_t nrFactors() const { return factors.size(); }
+ size_t nrEdges() const { return G.nrEdges(); }
+
+ /// Provides read access to neighbors of variable
+ const Neighbors & nbV( size_t i ) const { return G.nb1(i); }
+ /// Provides full access to neighbors of variable
+ Neighbors & nbV( size_t i ) { return G.nb1(i); }
+ /// Provides read access to neighbors of factor
+ const Neighbors & nbF( size_t I ) const { return G.nb2(I); }
+ /// Provides full access to neighbors of factor
+ Neighbors & nbF( size_t I ) { return G.nb2(I); }
+ /// Provides read access to neighbor of variable
+ const Neighbor & nbV( size_t i, size_t _I ) const { return G.nb1(i)[_I]; }
+ /// Provides full access to neighbor of variable
+ Neighbor & nbV( size_t i, size_t _I ) { return G.nb1(i)[_I]; }
+ /// Provides read access to neighbor of factor
+ const Neighbor & nbF( size_t I, size_t _i ) const { return G.nb2(I)[_i]; }
+ /// Provides full access to neighbor of factor
+ Neighbor & nbF( size_t I, size_t _i ) { return G.nb2(I)[_i]; }
size_t findVar(const Var & n) const {
- size_t i = find( vars().begin(), vars().end(), n ) - vars().begin();
+ size_t i = find( vars.begin(), vars.end(), n ) - vars.begin();
assert( i != nrVars() );
return i;
}
friend std::ostream& operator << (std::ostream& os, const FactorGraph& fg);
friend std::istream& operator >> (std::istream& is, FactorGraph& fg);
- VarSet delta(const Var & n) const;
- VarSet Delta(const Var & n) const;
- virtual void makeFactorCavity(size_t I);
- virtual void makeCavity(const Var & n);
+ VarSet delta( unsigned i ) const;
+ VarSet Delta( unsigned i ) const;
+ virtual void makeCavity( unsigned i );
long ReadFromFile(const char *filename);
long WriteToFile(const char *filename) const;
long WriteToDotFile(const char *filename) const;
- Factor ExactMarginal(const VarSet & x) const;
- Real ExactlogZ() const;
-
virtual void clamp( const Var & n, size_t i );
bool hasNegatives() const;
virtual void undoProb( size_t I );
void saveProb( size_t I );
- bool isConnected() const;
-
virtual void updatedFactor( size_t /*I*/ ) {};
private:
- /// Part of constructors (creates edges, neighbours and adjacency matrix)
+ /// Part of constructors (creates edges, neighbors and adjacency matrix)
void createGraph( size_t nrEdges );
};
// assumes that the set of variables in [var_begin,var_end) is the union of the variables in the factors in [fact_begin, fact_end)
template<typename FactorInputIterator, typename VarInputIterator>
-FactorGraph::FactorGraph(FactorInputIterator fact_begin, FactorInputIterator fact_end, VarInputIterator var_begin, VarInputIterator var_end, size_t nr_fact_hint, size_t nr_var_hint ) : BipartiteGraph<Var,Factor>(), _undoProbs(), _normtype(Prob::NORMPROB) {
+FactorGraph::FactorGraph(FactorInputIterator fact_begin, FactorInputIterator fact_end, VarInputIterator var_begin, VarInputIterator var_end, size_t nr_fact_hint, size_t nr_var_hint ) : G(), _undoProbs(), _normtype(Prob::NORMPROB) {
// add factors
size_t nrEdges = 0;
- V2s().reserve( nr_fact_hint );
+ factors.reserve( nr_fact_hint );
for( FactorInputIterator p2 = fact_begin; p2 != fact_end; ++p2 ) {
- V2s().push_back( *p2 );
- nrEdges += p2->vars().size();
+ factors.push_back( *p2 );
+ nrEdges += p2->vars().size();
}
// add variables
- V1s().reserve( nr_var_hint );
+ vars.reserve( nr_var_hint );
for( VarInputIterator p1 = var_begin; p1 != var_end; ++p1 )
- V1s().push_back( *p1 );
+ vars.push_back( *p1 );
// create graph structure
createGraph( nrEdges );
/// HAK provides an implementation of the single and double-loop algorithms by Heskes, Albers and Kappen
class HAK : public DAIAlgRG {
protected:
- std::vector<Factor> _Qa;
- std::vector<Factor> _Qb;
- std::vector<Factor> _muab;
- std::vector<Factor> _muba;
+ std::vector<Factor> _Qa;
+ std::vector<Factor> _Qb;
+ std::vector<std::vector<Factor> > _muab;
+ std::vector<std::vector<Factor> > _muba;
public:
/// Default constructor
bool DoubleLoop() { return GetPropertyAs<bool>("doubleloop"); }
size_t LoopDepth() { return GetPropertyAs<size_t>("loopdepth"); }
- Factor & muab( size_t alpha, size_t beta ) { return _muab[ORIR2E(alpha,beta)]; }
- Factor & muba( size_t beta, size_t alpha ) { return _muba[ORIR2E(alpha,beta)]; }
+ Factor & muab( size_t alpha, size_t _beta ) { return _muab[alpha][_beta]; }
+ Factor & muba( size_t alpha, size_t _beta ) { return _muba[alpha][_beta]; }
const Factor& Qa( size_t alpha ) const { return _Qa[alpha]; };
const Factor& Qb( size_t beta ) const { return _Qb[beta]; };
-// void Regenerate();
double doGBP();
double doDoubleLoop();
double run();
DEdgeVec _RTree; // rooted tree
std::vector<Factor> _Qa;
std::vector<Factor> _Qb;
- std::vector<Factor> _mes;
+ std::vector<std::vector<Factor> > _mes;
double _logZ;
JTree( const FactorGraph &fg, const Properties &opts, bool automatic=true );
void GenerateJT( const std::vector<VarSet> &Cliques );
- Factor & message(size_t i1, size_t i2) { return( _mes[ORIR2E(i1,i2)] ); }
- const Factor & message(size_t i1, size_t i2) const { return( _mes[ORIR2E(i1,i2)] ); }
+ Factor & message( size_t alpha, size_t _beta ) { return _mes[alpha][_beta]; }
+ const Factor & message( size_t alpha, size_t _beta ) const { return _mes[alpha][_beta]; }
static const char *Name;
std::string identify() const;
-// void Regenerate();
- void init() {
- assert( checkProperties() );
- }
+ void init() { assert( checkProperties() ); }
void runHUGIN();
void runShaferShenoy();
double run();
class LC : public DAIAlgFG {
protected:
- typedef struct { size_t i; size_t I; } _iI_type;
-
std::vector<Factor> _pancakes; // used by all LC types (psi_I is stored in the pancake)
std::vector<Factor> _cavitydists; // used by all LC types to store the approximate cavity distribution
- /// _phis[VV2E(i,I)] corresponds to \f$ \phi^{\setminus i}_I(x_{I \setminus i}) \f$
- std::vector<Factor> _phis;
+ /// _phis[i][_I] corresponds to \f$ \phi^{\setminus i}_I(x_{I \setminus i}) \f$
+ std::vector<std::vector<Factor> > _phis;
/// Single variable beliefs
std::vector<Factor> _beliefs;
- /// For each pair (i,j) with j in delta(i), store i and the common factor I
- std::vector<_iI_type> _iI;
-
public:
ENUM6(CavityType,FULL,PAIR,PAIR2,PAIRINT,PAIRCUM,UNIFORM)
ENUM3(UpdateType,SEQFIX,SEQRND,NONE)
/// Default constructor
LC() : DAIAlgFG() {};
/// Copy constructor
- LC(const LC & x) : DAIAlgFG(x), _pancakes(x._pancakes), _cavitydists(x._cavitydists), _phis(x._phis), _beliefs(x._beliefs), _iI(x._iI) {};
+ LC(const LC & x) : DAIAlgFG(x), _pancakes(x._pancakes), _cavitydists(x._cavitydists), _phis(x._phis), _beliefs(x._beliefs) {};
/// Clone function
LC* clone() const { return new LC(*this); }
/// Construct LC object from a FactorGraph and parameters
_cavitydists = x._cavitydists;
_phis = x._phis;
_beliefs = x._beliefs;
- _iI = x._iI;
}
return *this;
}
long SetCavityDists( std::vector<Factor> &Q );
void init();
- Factor NewPancake (size_t iI, bool & hasNaNs);
+ Factor NewPancake (size_t i, size_t _I, bool & hasNaNs);
double run();
std::string identify() const;
const Factor & belief (size_t i) const { return _beliefs[i]; };
const Factor & pancake (size_t i) const { return _pancakes[i]; };
const Factor & cavitydist (size_t i) const { return _cavitydists[i]; };
- size_t nr_iI() const { return _iI.size(); };
void clamp( const Var &/*n*/, size_t /*i*/ ) { assert( 0 == 1 ); }
void undoProbs( const VarSet &/*ns*/ ) { assert( 0 == 1 ); }
void saveProbs( const VarSet &/*ns*/ ) { assert( 0 == 1 ); }
- void makeFactorCavity(size_t /*I*/) { assert( 0 == 1 ); }
virtual void makeCavity(const Var & /*n*/) { assert( 0 == 1 ); }
bool checkProperties();
};
void Regenerate();
void init();
double run();
- Factor belief1 (size_t i) const;
+ Factor beliefV (size_t i) const;
Factor belief (const Var &n) const;
Factor belief (const VarSet &ns) const;
std::vector<Factor> beliefs() const;
};
-typedef BipartiteGraph<FRegion,Region> BipRegGraph;
-
-
/// A RegionGraph is a bipartite graph consisting of outer regions (type FRegion) and inner regions (type Region)
-class RegionGraph : public FactorGraph, BipRegGraph {
+class RegionGraph : public FactorGraph {
public:
- typedef BipRegGraph::_nb_t R_nb_t;
- typedef R_nb_t::const_iterator R_nb_cit;
- typedef BipRegGraph::_edge_t R_edge_t;
+ BipartiteGraph G;
+ std::vector<FRegion> ORs;
+ std::vector<Region> IRs;
-
protected:
/// Give back the OR index that corresponds to a factor index
- typedef std::map<size_t,size_t>::const_iterator fac2OR_cit;
- std::map<size_t,size_t> _fac2OR;
+ std::vector<size_t> fac2OR;
public:
/// Default constructor
- RegionGraph() : FactorGraph(), BipRegGraph(), _fac2OR() {}
+ RegionGraph() : FactorGraph(), G(), ORs(), IRs(), fac2OR() {}
/// Constructs a RegionGraph from a FactorGraph
- RegionGraph(const FactorGraph & fg) : FactorGraph(fg), BipRegGraph(), _fac2OR() {}
+ RegionGraph( const FactorGraph &fg ) : FactorGraph(fg), G(), ORs(), IRs(), fac2OR() {}
/// Constructs a RegionGraph from a FactorGraph, a vector of outer regions, a vector of inner regions and a vector of edges
- RegionGraph(const FactorGraph & fg, const std::vector<Region> & ors, const std::vector<Region> & irs, const std::vector<R_edge_t> & edges);
+ RegionGraph( const FactorGraph &fg, const std::vector<Region> &ors, const std::vector<Region> &irs, const std::vector<std::pair<size_t,size_t> > &edges );
/// Constructs a RegionGraph from a FactorGraph and a vector of outer VarSets (CVM style)
- RegionGraph(const FactorGraph & fg, const std::vector<VarSet> & cl);
+ RegionGraph( const FactorGraph &fg, const std::vector<VarSet> &cl );
/// Copy constructor
- RegionGraph(const RegionGraph & x) : FactorGraph(x), BipRegGraph(x), _fac2OR(x._fac2OR) {}
+ RegionGraph( const RegionGraph &x ) : FactorGraph(x), G(x.G), ORs(x.ORs), IRs(x.IRs), fac2OR(x.fac2OR) {}
/// Assignment operator
- RegionGraph & operator=(const RegionGraph & x) {
+ RegionGraph & operator=( const RegionGraph &x ) {
if( this != &x ) {
- FactorGraph::operator=(x);
- BipRegGraph::operator=(x);
- _fac2OR = x._fac2OR;
+ FactorGraph::operator=( x );
+ G = x.G;
+ ORs = x.ORs;
+ IRs = x.IRs;
+ fac2OR = x.fac2OR;
}
return *this;
}
/// Provides read access to outer region
- const FRegion & OR(long alpha) const { return BipRegGraph::V1(alpha); }
+ const FRegion & OR(size_t alpha) const { return ORs[alpha]; }
/// Provides access to outer region
- FRegion & OR(long alpha) { return BipRegGraph::V1(alpha); }
- /// Provides read access to all outer regions
- const std::vector<FRegion> & ORs() const { return BipRegGraph::V1s(); }
- /// Provides access to all outer regions
- std::vector<FRegion> &ORs() { return BipRegGraph::V1s(); }
- /// Returns number of outer regions
- size_t nr_ORs() const { return BipRegGraph::V1s().size(); }
+ FRegion & OR(size_t alpha) { return ORs[alpha]; }
/// Provides read access to inner region
- const Region & IR(long beta) const { return BipRegGraph::V2(beta); }
+ const Region & IR(size_t beta) const { return IRs[beta]; }
/// Provides access to inner region
- Region & IR(long beta) { return BipRegGraph::V2(beta); }
- /// Provides read access to all inner regions
- const std::vector<Region> & IRs() const { return BipRegGraph::V2s(); }
- /// Provides access to all inner regions
- std::vector<Region> & IRs() { return BipRegGraph::V2s(); }
+ Region & IR(size_t beta) { return IRs[beta]; }
+
+ /// Returns number of outer regions
+ size_t nrORs() const { return ORs.size(); }
/// Returns number of inner regions
- size_t nr_IRs() const { return BipRegGraph::V2s().size(); }
-
- /// Provides read access to edge
- const R_edge_t & Redge(size_t ind) const { return BipRegGraph::edge(ind); }
- /// Provides full access to edge
- R_edge_t & Redge(size_t ind) { return BipRegGraph::edge(ind); }
- /// Provides read access to all edges
- const std::vector<R_edge_t> & Redges() const { return BipRegGraph::edges(); }
- /// Provides full access to all edges
- std::vector<R_edge_t> & Redges() { return BipRegGraph::edges(); }
- /// Returns number of edges
- size_t nr_Redges() const { return BipRegGraph::edges().size(); }
-
- /// Provides read access to neighbours of outer region
- const R_nb_t & nbOR( size_t i1 ) const { return BipRegGraph::nb1(i1); }
- /// Provides full access to neighbours of outer region
- R_nb_t & nbOR( size_t i1 ) { return BipRegGraph::nb1(i1); }
-
- /// Provides read access to neighbours of inner region
- const R_nb_t & nbIR( size_t i2 ) const { return BipRegGraph::nb2(i2); }
- /// Provides full access to neighbours of inner region
- R_nb_t & nbIR( size_t i2 ) { return BipRegGraph::nb2(i2); }
-
- /// Converts the pair of outer/inner region indices (i1,i2) to the corresponding edge index
- size_t ORIR2E( const size_t i1, const size_t i2 ) const { return BipRegGraph::VV2E(i1, i2); }
-
- void Regenerate() { BipRegGraph::Regenerate(); }
-
-
+ size_t nrIRs() const { return IRs.size(); }
+
+
+ /// Provides read access to neighbors of outer region
+ const Neighbors & nbOR( size_t alpha ) const { return G.nb1(alpha); }
+ /// Provides full access to neighbors of outer region
+ Neighbors & nbOR( size_t alpha ) { return G.nb1(alpha); }
+
+ /// Provides read access to neighbors of inner region
+ const Neighbors & nbIR( size_t beta ) const { return G.nb2(beta); }
+ /// Provides full access to neighbors of inner region
+ Neighbors & nbIR( size_t beta ) { return G.nb2(beta); }
+
+
/// Calculates counting numbers of inner regions based upon counting numbers of outer regions
void Calc_Counting_Numbers();
/// Check whether the counting numbers are valid
void clamp( const Var &n, size_t i ) { FactorGraph::clamp( n, i ); RecomputeORs( n ); }
/// We have to overload FactorGraph::makeCavity because the corresponding outer regions have to be recomputed
- void makeCavity( const Var &n ) { FactorGraph::makeCavity( n ); RecomputeORs( n ); }
-
- /// We have to overload FactorGraph::makeFactorCavity because the corresponding outer regions have to be recomputed
- void makeFactorCavity( size_t I ) { FactorGraph::makeFactorCavity( I ); RecomputeOR( I ); }
+ void makeCavity( size_t i ) { FactorGraph::makeCavity( i ); RecomputeORs( var(i) ); }
/// We have to overload FactorGraph::undoProbs because the corresponding outer regions have to be recomputed
void undoProbs( const VarSet &ns ) { FactorGraph::undoProbs( ns ); RecomputeORs( ns ); }
std::vector<Factor> _Qa;
std::vector<Factor> _Qb;
DEdgeVec _RTree;
- std::vector<size_t> _a; // _Qa[alpha] <-> superTree._Qa[_a[alpha]]
- std::vector<size_t> _b; // _Qb[beta] <-> superTree._Qb[_b[beta]]
+ std::vector<size_t> _a; // _Qa[alpha] <-> superTree._Qa[_a[alpha]]
+ std::vector<size_t> _b; // _Qb[beta] <-> superTree._Qb[_b[beta]]
// _Qb[beta] <-> _RTree[beta]
const Factor * _I;
VarSet _ns;
double run();
Complex logZ() const;
- bool offtree(size_t I) const { return !_fac2OR.count(I); }
+ bool offtree( size_t I ) const { return (fac2OR[I] == -1U); }
void init( const VarSet &/*ns*/ ) { init(); }
void undoProbs( const VarSet &ns ) { RegionGraph::undoProbs( ns ); init( ns ); }
#include <sys/times.h>
#include <cstdio>
+#include <boost/foreach.hpp>
+
+
+#define foreach BOOST_FOREACH
namespace dai {
InfAlg *newInfAlg( const string &name, const FactorGraph &fg, const Properties &opts ) {
+#ifdef WITH_BP
if( name == BP::Name )
return new BP (fg, opts);
+#endif
+#ifdef WITH_MF
else if( name == MF::Name )
return new MF (fg, opts);
+#endif
+#ifdef WITH_HAK
else if( name == HAK::Name )
return new HAK (fg, opts);
+#endif
+#ifdef WITH_LC
else if( name == LC::Name )
return new LC (fg, opts);
+#endif
+#ifdef WITH_TREEEP
else if( name == TreeEP::Name )
return new TreeEP (fg, opts);
- else if( name == MR::Name )
- return new MR (fg, opts);
+#endif
+#ifdef WITH_JTREE
else if( name == JTree::Name )
return new JTree (fg, opts);
+#endif
+#ifdef WITH_MR
+ else if( name == MR::Name )
+ return new MR (fg, opts);
+#endif
else
throw "Unknown inference algorithm";
}
void BP::Regenerate() {
- DAIAlgFG::Regenerate();
-
- // clear messages
- _messages.clear();
- _messages.reserve(nr_edges());
-
- // clear indices
- _indices.clear();
- _indices.reserve(nr_edges());
-
- // create messages and indices
- for( vector<_edge_t>::const_iterator iI=edges().begin(); iI!=edges().end(); ++iI ) {
- _messages.push_back( Prob( var(iI->first).states() ) );
-
- vector<size_t> ind( factor(iI->second).stateSpace(), 0 );
- Index i (var(iI->first), factor(iI->second).vars() );
- for( size_t j = 0; i >= 0; ++i,++j )
- ind[j] = i;
- _indices.push_back( ind );
+ // create edge properties
+ edges.clear();
+ edges.reserve( nrVars() );
+ for( size_t i = 0; i < nrVars(); ++i ) {
+ edges.push_back( vector<EdgeProp>() );
+ edges[i].reserve( nbV(i).size() );
+ foreach( const Neighbor &I, nbV(i) ) {
+ EdgeProp newEP;
+ newEP.message = Prob( var(i).states() );
+ newEP.newMessage = Prob( var(i).states() );
+
+ newEP.index.reserve( factor(I).stateSpace() );
+ for( Index k( var(i), factor(I).vars() ); k >= 0; ++k )
+ newEP.index.push_back( k );
+
+ newEP.residual = 0.0;
+ edges[i].push_back( newEP );
+ }
}
-
- // create new_messages
- _newmessages = _messages;
}
void BP::init() {
assert( checkProperties() );
- for( vector<Prob>::iterator mij = _messages.begin(); mij != _messages.end(); ++mij )
- mij->fill(1.0 / mij->size());
- _newmessages = _messages;
+ for( size_t i = 0; i < nrVars(); ++i ) {
+ foreach( const Neighbor &I, nbV(i) ) {
+ message( i, I.iter ).fill( 1.0 );
+ newMessage( i, I.iter ).fill( 1.0 );
+ }
+ }
+}
+
+
+void BP::findMaxResidual( size_t &i, size_t &_I ) {
+ i = 0;
+ _I = 0;
+ double maxres = residual( i, _I );
+ for( size_t j = 0; j < nrVars(); ++j )
+ foreach( const Neighbor &I, nbV(j) )
+ if( residual( j, I.iter ) > maxres ) {
+ i = j;
+ _I = I.iter;
+ maxres = residual( i, _I );
+ }
}
-void BP::calcNewMessage (size_t iI) {
+void BP::calcNewMessage( size_t i, size_t _I ) {
// calculate updated message I->i
- size_t i = edge(iI).first;
- size_t I = edge(iI).second;
+ size_t I = nbV(i,_I);
/* UNOPTIMIZED (SIMPLE TO READ, BUT SLOW) VERSION
Prob prod( factor(I).p() );
// Calculate product of incoming messages and factor I
- for( _nb_cit j = nb2(I).begin(); j != nb2(I).end(); ++j )
- if( *j != i ) { // for all j in I \ i
+ foreach( const Neighbor &j, nbF(I) ) {
+ if( j != i ) { // for all j in I \ i
+ size_t _I = j.dual;
// ind is the precalculated Index(j,I) i.e. to x_I == k corresponds x_j == ind[k]
- _ind_t* ind = &(index(*j,I));
+ const ind_t & ind = index(j, _I);
// prod_j will be the product of messages coming into j
- Prob prod_j( var(*j).states() );
- for( _nb_cit J = nb1(*j).begin(); J != nb1(*j).end(); ++J )
- if( *J != I ) // for all J in nb(j) \ I
- prod_j *= message(*j,*J);
+ Prob prod_j( var(j).states() );
+ foreach( const Neighbor &J, nbV(j) ) {
+ if( J != I ) // for all J in nb(j) \ I
+ prod_j *= message( j, J.iter );
+ }
// multiply prod with prod_j
for( size_t r = 0; r < prod.size(); ++r )
- prod[r] *= prod_j[(*ind)[r]];
+ prod[r] *= prod_j[ind[r]];
}
+ }
// Marginalize onto i
Prob marg( var(i).states(), 0.0 );
// ind is the precalculated Index(i,I) i.e. to x_I == k corresponds x_i == ind[k]
- _ind_t* ind = &(index(i,I));
+ const ind_t ind = index(i,_I);
for( size_t r = 0; r < prod.size(); ++r )
- marg[(*ind)[r]] += prod[r];
+ marg[ind[r]] += prod[r];
marg.normalize( _normtype );
// Store result
- _newmessages[iI] = marg;
+ newMessage(i,_I) = marg;
}
clock_t tic = toc();
Diffs diffs(nrVars(), 1.0);
- vector<size_t> edge_seq;
- vector<double> residuals;
+ typedef pair<size_t,size_t> Edge;
+ vector<Edge> update_seq;
vector<Factor> old_beliefs;
old_beliefs.reserve( nrVars() );
for( size_t i = 0; i < nrVars(); ++i )
- old_beliefs.push_back(belief1(i));
+ old_beliefs.push_back( beliefV(i) );
size_t iter = 0;
+ size_t nredges = nrEdges();
if( Updates() == UpdateType::SEQMAX ) {
// do the first pass
- for(size_t iI = 0; iI < nr_edges(); ++iI )
- calcNewMessage(iI);
-
- // calculate initial residuals
- residuals.reserve(nr_edges());
- for( size_t iI = 0; iI < nr_edges(); ++iI )
- residuals.push_back( dist( _newmessages[iI], _messages[iI], Prob::DISTLINF ) );
+ for( size_t i = 0; i < nrVars(); ++i )
+ foreach( const Neighbor &I, nbV(i) ) {
+ calcNewMessage( i, I.iter );
+ // calculate initial residuals
+ residual( i, I.iter ) = dist( newMessage( i, I.iter ), message( i, I.iter ), Prob::DISTLINF );
+ }
} else {
- edge_seq.reserve( nr_edges() );
- for( size_t i = 0; i < nr_edges(); ++i )
- edge_seq.push_back( i );
+ update_seq.reserve( nredges );
+ for( size_t i = 0; i < nrVars(); ++i )
+ foreach( const Neighbor &I, nbV(i) )
+ update_seq.push_back( Edge( i, I.iter ) );
}
// do several passes over the network until maximum number of iterations has
for( iter=0; iter < MaxIter() && diffs.max() > Tol(); ++iter ) {
if( Updates() == UpdateType::SEQMAX ) {
// Residuals-BP by Koller et al.
- for( size_t t = 0; t < nr_edges(); ++t ) {
+ for( size_t t = 0; t < nredges; ++t ) {
// update the message with the largest residual
- size_t iI = max_element(residuals.begin(), residuals.end()) - residuals.begin();
- _messages[iI] = _newmessages[iI];
- residuals[iI] = 0;
+
+ size_t i, _I;
+ findMaxResidual( i, _I );
+ message( i, _I ) = newMessage( i, _I );
+ residual( i, _I ) = 0.0;
// I->i has been updated, which means that residuals for all
// J->j with J in nb[i]\I and j in nb[J]\i have to be updated
- size_t i = edge(iI).first;
- size_t I = edge(iI).second;
- for( _nb_cit J = nb1(i).begin(); J != nb1(i).end(); ++J )
- if( *J != I )
- for( _nb_cit j = nb2(*J).begin(); j != nb2(*J).end(); ++j )
- if( *j != i ) {
- size_t jJ = VV2E(*j,*J);
- calcNewMessage(jJ);
- residuals[jJ] = dist( _newmessages[jJ], _messages[jJ], Prob::DISTLINF );
+ foreach( const Neighbor &J, nbV(i) ) {
+ if( J.iter != _I ) {
+ foreach( const Neighbor &j, nbF(J) ) {
+ size_t _J = j.dual;
+ if( j != i ) {
+ calcNewMessage( j, _J );
+ residual( j, _J ) = dist( newMessage( j, _J ), message( j, _J ), Prob::DISTLINF );
}
+ }
+ }
+ }
}
} else if( Updates() == UpdateType::PARALL ) {
// Parallel updates
- for( size_t t = 0; t < nr_edges(); ++t )
- calcNewMessage(t);
+ for( size_t i = 0; i < nrVars(); ++i )
+ foreach( const Neighbor &I, nbV(i) )
+ calcNewMessage( i, I.iter );
- for( size_t t = 0; t < nr_edges(); ++t )
- _messages[t] = _newmessages[t];
+ for( size_t i = 0; i < nrVars(); ++i )
+ foreach( const Neighbor &I, nbV(i) )
+ message( i, I.iter ) = newMessage( i, I.iter );
} else {
// Sequential updates
if( Updates() == UpdateType::SEQRND )
- random_shuffle( edge_seq.begin(), edge_seq.end() );
+ random_shuffle( update_seq.begin(), update_seq.end() );
- for( size_t t = 0; t < nr_edges(); ++t ) {
- size_t k = edge_seq[t];
- calcNewMessage(k);
- _messages[k] = _newmessages[k];
+ foreach( const Edge &e, update_seq ) {
+ calcNewMessage( e.first, e.second );
+ message( e.first, e.second ) = newMessage( e.first, e.second );
}
}
// calculate new beliefs and compare with old ones
for( size_t i = 0; i < nrVars(); ++i ) {
- Factor nb( belief1(i) );
+ Factor nb( beliefV(i) );
diffs.push( dist( nb, old_beliefs[i], Prob::DISTLINF ) );
old_beliefs[i] = nb;
}
}
-Factor BP::belief1( size_t i ) const {
+Factor BP::beliefV( size_t i ) const {
Prob prod( var(i).states() );
- for( _nb_cit I = nb1(i).begin(); I != nb1(i).end(); ++I )
- prod *= newMessage(i,*I);
+ foreach( const Neighbor &I, nbV(i) )
+ prod *= newMessage( i, I.iter );
prod.normalize( Prob::NORMPROB );
return( Factor( var(i), prod ) );
Factor BP::belief (const Var &n) const {
- return( belief1( findVar( n ) ) );
+ return( beliefV( findVar( n ) ) );
}
vector<Factor> BP::beliefs() const {
vector<Factor> result;
for( size_t i = 0; i < nrVars(); ++i )
- result.push_back( belief1(i) );
+ result.push_back( beliefV(i) );
for( size_t I = 0; I < nrFactors(); ++I )
- result.push_back( belief2(I) );
+ result.push_back( beliefF(I) );
return result;
}
if( factor(I).vars() >> ns )
break;
assert( I != nrFactors() );
- return belief2(I).marginal(ns);
+ return beliefF(I).marginal(ns);
}
}
-Factor BP::belief2 (size_t I) const {
+Factor BP::beliefF (size_t I) const {
Prob prod( factor(I).p() );
- for( _nb_cit j = nb2(I).begin(); j != nb2(I).end(); ++j ) {
+ foreach( const Neighbor &j, nbF(I) ) {
+ size_t _I = j.dual;
// ind is the precalculated Index(j,I) i.e. to x_I == k corresponds x_j == ind[k]
- const _ind_t *ind = &(index(*j, I));
+ const ind_t & ind = index(j, _I);
// prod_j will be the product of messages coming into j
- Prob prod_j( var(*j).states() );
- for( _nb_cit J = nb1(*j).begin(); J != nb1(*j).end(); ++J )
- if( *J != I ) // for all J in nb(j) \ I
- prod_j *= newMessage(*j,*J);
+ Prob prod_j( var(j).states() );
+ foreach( const Neighbor &J, nbV(j) ) {
+ if( J != I ) // for all J in nb(j) \ I
+ prod_j *= newMessage( j, J.iter );
+ }
// multiply prod with prod_j
for( size_t r = 0; r < prod.size(); ++r )
- prod[r] *= prod_j[(*ind)[r]];
+ prod[r] *= prod_j[ind[r]];
}
Factor result( factor(I).vars(), prod );
Complex BP::logZ() const {
Complex sum = 0.0;
for(size_t i = 0; i < nrVars(); ++i )
- sum += Complex(1.0 - nb1(i).size()) * belief1(i).entropy();
+ sum += Complex(1.0 - nbV(i).size()) * beliefV(i).entropy();
for( size_t I = 0; I < nrFactors(); ++I )
- sum -= KL_dist( belief2(I), factor(I) );
+ sum -= KL_dist( beliefF(I), factor(I) );
return sum;
}
void BP::init( const VarSet &ns ) {
for( VarSet::const_iterator n = ns.begin(); n != ns.end(); ++n ) {
size_t ni = findVar( *n );
- for( _nb_cit I = nb1(ni).begin(); I != nb1(ni).end(); ++I )
- message(ni,*I).fill( 1.0 );
+ foreach( const Neighbor &I, nbV( ni ) )
+ message( ni, I.iter ).fill( 1.0 );
}
}
#include <functional>
#include <tr1/unordered_map>
#include <dai/factorgraph.h>
+#include <dai/util.h>
namespace dai {
using namespace std;
-FactorGraph::FactorGraph( const vector<Factor> &P ) : BipartiteGraph<Var,Factor>(), _undoProbs(), _normtype(Prob::NORMPROB) {
+FactorGraph::FactorGraph( const vector<Factor> &P ) : G(), _undoProbs(), _normtype(Prob::NORMPROB) {
// add factors, obtain variables
set<Var> _vars;
- V2s().reserve( P.size() );
+ factors.reserve( P.size() );
size_t nrEdges = 0;
for( vector<Factor>::const_iterator p2 = P.begin(); p2 != P.end(); p2++ ) {
- V2s().push_back( *p2 );
- copy( p2->vars().begin(), p2->vars().end(), inserter( _vars, _vars.begin() ) );
- nrEdges += p2->vars().size();
+ factors.push_back( *p2 );
+ copy( p2->vars().begin(), p2->vars().end(), inserter( _vars, _vars.begin() ) );
+ nrEdges += p2->vars().size();
}
// add _vars
- V1s().reserve( _vars.size() );
+ vars.reserve( _vars.size() );
for( set<Var>::const_iterator p1 = _vars.begin(); p1 != _vars.end(); p1++ )
- V1s().push_back( *p1 );
+ vars.push_back( *p1 );
// create graph structure
createGraph( nrEdges );
// create a mapping for indices
std::tr1::unordered_map<size_t, size_t> hashmap;
- for( size_t i = 0; i < vars().size(); i++ )
- hashmap[vars()[i].label()] = i;
+ for( size_t i = 0; i < vars.size(); i++ )
+ hashmap[var(i).label()] = i;
- // create edges
- edges().reserve( nrEdges );
+ // create edge list
+ typedef pair<unsigned,unsigned> Edge;
+ vector<Edge> edges;
+ edges.reserve( nrEdges );
for( size_t i2 = 0; i2 < nrFactors(); i2++ ) {
const VarSet& ns = factor(i2).vars();
for( VarSet::const_iterator q = ns.begin(); q != ns.end(); q++ )
- edges().push_back(_edge_t(hashmap[q->label()], i2));
+ edges.push_back( Edge(hashmap[q->label()], i2) );
}
- // calc neighbours and adjacency matrix
- Regenerate();
+ // create bipartite graph
+ G.create( nrVars(), nrFactors(), edges.begin(), edges.end() );
}
-/*FactorGraph& FactorGraph::addFactor( const Factor &I ) {
- // add Factor
- _V2.push_back( I );
-
- // add new vars in Factor
- for( VarSet::const_iterator i = I.vars().begin(); i != I.vars().end(); i++ ) {
- size_t i_ind = find(vars().begin(), vars().end(), *i) - vars().begin();
- if( i_ind == vars().size() )
- _V1.push_back( *i );
- _E12.push_back( _edge_t( i_ind, nrFactors() - 1 ) );
- }
-
- Regenerate();
- return(*this);
-}*/
-
-
ostream& operator << (ostream& os, const FactorGraph& fg) {
os << fg.nrFactors() << endl;
}
-VarSet FactorGraph::delta(const Var & n) const {
+VarSet FactorGraph::delta( unsigned i ) const {
// calculate Markov Blanket
- size_t i = findVar( n );
-
VarSet del;
- for( _nb_cit I = nb1(i).begin(); I != nb1(i).end(); ++I )
- for( _nb_cit j = nb2(*I).begin(); j != nb2(*I).end(); ++j )
- if( *j != i )
- del |= var(*j);
+ foreach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
+ foreach( const Neighbor &j, nbF(I) ) // for all neighboring variables j of I
+ if( j != i )
+ del |= var(j);
return del;
}
-VarSet FactorGraph::Delta(const Var & n) const {
- return( delta(n) | n );
+VarSet FactorGraph::Delta( unsigned i ) const {
+ return( delta(i) | var(i) );
}
-void FactorGraph::makeFactorCavity(size_t I) {
- // fill Factor I with ones
- factor(I).fill(1.0);
-}
-
-
-void FactorGraph::makeCavity(const Var & n) {
- // fills all Factors that include Var n with ones
- size_t i = findVar( n );
-
- for( _nb_cit I = nb1(i).begin(); I != nb1(i).end(); ++I )
- factor(*I).fill(1.0);
+void FactorGraph::makeCavity( unsigned i ) {
+ // fills all Factors that include var(i) with ones
+ foreach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
+ factor(I).fill( 1.0 );
}
bool result = false;
for( size_t I = 0; I < nrFactors() && !result; I++ )
if( factor(I).hasNegatives() )
- result = true;
+ result = true;
return result;
}
-/*FactorGraph & FactorGraph::DeleteFactor(size_t I) {
- // Go through all edges
- for( vector<_edge_t>::iterator edge = _E12.begin(); edge != _E12.end(); edge++ )
- if( edge->second >= I ) {
- if( edge->second == I )
- edge->second = -1UL;
- else
- (edge->second)--;
- }
- // Remove all edges containing I
- for( vector<_edge_t>::iterator edge = _E12.begin(); edge != _E12.end(); edge++ )
- if( edge->second == -1UL )
- edge = _E12.erase( edge );
-// vector<_edge_t>::iterator new_end = _E12.remove_if( _E12.begin(), _E12.end(), compose1( bind2nd(equal_to<size_t>(), -1), select2nd<_edge_t>() ) );
-// _E12.erase( new_end, _E12.end() );
-
- // Erase the factor
- _V2.erase( _V2.begin() + I );
-
- Regenerate();
-
- return *this;
-}
-
-
-FactorGraph & FactorGraph::DeleteVar(size_t i) {
- // Go through all edges
- for( vector<_edge_t>::iterator edge = _E12.begin(); edge != _E12.end(); edge++ )
- if( edge->first >= i ) {
- if( edge->first == i )
- edge->first = -1UL;
- else
- (edge->first)--;
- }
- // Remove all edges containing i
- for( vector<_edge_t>::iterator edge = _E12.begin(); edge != _E12.end(); edge++ )
- if( edge->first == -1UL )
- edge = _E12.erase( edge );
-
-// vector<_edge_t>::iterator new_end = _E12.remove_if( _E12.begin(), _E12.end(), compose1( bind2nd(equal_to<size_t>(), -1), select1st<_edge_t>() ) );
-// _E12.erase( new_end, _E12.end() );
-
- // Erase the variable
- _V1.erase( _V1.begin() + i );
-
- Regenerate();
-
- return *this;
-}*/
-
-
long FactorGraph::ReadFromFile(const char *filename) {
ifstream infile;
infile.open (filename);
outfile << "node[shape=box,style=filled,color=lightgrey,width=0.3,height=0.3,fixedsize=true];" << endl;
for( size_t I = 0; I < nrFactors(); I++ )
outfile << "\tp" << I << ";" << endl;
- for( size_t iI = 0; iI < nr_edges(); iI++ )
- outfile << "\tx" << var(edge(iI).first).label() << " -- p" << edge(iI).second << ";" << endl;
+ for( size_t i = 0; i < nrVars(); i++ )
+ foreach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
+ outfile << "\tx" << var(i).label() << " -- p" << I << ";" << endl;
outfile << "}" << endl;
} catch (char *e) {
cout << e << endl;
}
-Factor FactorGraph::ExactMarginal(const VarSet & x) const {
- Factor P;
- for( size_t I = 0; I < nrFactors(); I++ )
- P *= factor(I);
- return P.marginal(x);
-}
-
-
-Real FactorGraph::ExactlogZ() const {
- Factor P;
- for( size_t I = 0; I < nrFactors(); I++ )
- P *= factor(I);
- return std::log(P.totalSum());
-}
-
-
vector<VarSet> FactorGraph::Cliques() const {
vector<VarSet> result;
}
-bool FactorGraph::isConnected() const {
- if( nrVars() == 0 )
- return false;
- else {
- Var n = var( 0 );
-
- VarSet component = n;
-
- VarSet remaining;
- for( size_t i = 1; i < nrVars(); i++ )
- remaining |= var(i);
-
- bool found_new_vars = true;
- while( found_new_vars ) {
- VarSet new_vars;
- for( VarSet::const_iterator m = remaining.begin(); m != remaining.end(); m++ )
- if( delta(*m) && component )
- new_vars |= *m;
-
- if( new_vars.empty() )
- found_new_vars = false;
- else
- found_new_vars = true;
-
- component |= new_vars;
- remaining /= new_vars;
- };
- return remaining.empty();
- }
-}
-
-
} // end of namespace dai
void HAK::constructMessages() {
// Create outer beliefs
_Qa.clear();
- _Qa.reserve(nr_ORs());
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ _Qa.reserve(nrORs());
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
_Qa.push_back( Factor( OR(alpha).vars() ) );
// Create inner beliefs
_Qb.clear();
- _Qb.reserve(nr_IRs());
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ _Qb.reserve(nrIRs());
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
_Qb.push_back( Factor( IR(beta) ) );
// Create messages
_muab.clear();
- _muab.reserve(nr_Redges());
+ _muab.reserve( nrORs() );
_muba.clear();
- _muba.reserve(nr_Redges());
- for( vector<R_edge_t>::const_iterator ab = Redges().begin(); ab != Redges().end(); ab++ ) {
- _muab.push_back( Factor( IR(ab->second) ) );
- _muba.push_back( Factor( IR(ab->second) ) );
+ _muba.reserve( nrORs() );
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
+ _muab.push_back( vector<Factor>() );
+ _muba.push_back( vector<Factor>() );
+ _muab[alpha].reserve( nbOR(alpha).size() );
+ _muba[alpha].reserve( nbOR(alpha).size() );
+ foreach( const Neighbor &beta, nbOR(alpha) ) {
+ _muab[alpha].push_back( Factor( IR(beta) ) );
+ _muba[alpha].push_back( Factor( IR(beta) ) );
+ }
}
}
void HAK::findLoopClusters( const FactorGraph & fg, set<VarSet> &allcl, VarSet newcl, const Var & root, size_t length, VarSet vars ) {
for( VarSet::const_iterator in = vars.begin(); in != vars.end(); in++ ) {
- VarSet ind = fg.delta( *in );
+ VarSet ind = fg.delta( fg.findVar( *in ) );
if( (newcl.size()) >= 2 && (ind >> root) ) {
allcl.insert( newcl | *in );
}
cl = fg.Cliques();
} else if( Clusters() == ClustersType::DELTA ) {
for( size_t i = 0; i < fg.nrVars(); i++ )
- cl.push_back(fg.Delta(fg.var(i)));
+ cl.push_back(fg.Delta(i));
} else if( Clusters() == ClustersType::LOOP ) {
cl = fg.Cliques();
set<VarSet> scl;
- for( vector<Var>::const_iterator i0 = fg.vars().begin(); i0 != fg.vars().end(); i0++ ) {
- VarSet i0d = fg.delta(*i0);
+ for( size_t i0 = 0; i0 < fg.nrVars(); i0++ ) {
+ VarSet i0d = fg.delta(i0);
if( LoopDepth() > 1 )
- findLoopClusters( fg, scl, *i0, *i0, LoopDepth() - 1, fg.delta(*i0) );
+ findLoopClusters( fg, scl, fg.var(i0), fg.var(i0), LoopDepth() - 1, fg.delta(i0) );
}
for( set<VarSet>::const_iterator c = scl.begin(); c != scl.end(); c++ )
cl.push_back(*c);
if( alpha->vars() && ns )
alpha->fill( 1.0 / alpha->stateSpace() );
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
if( IR(beta) && ns ) {
_Qb[beta].fill( 1.0 / IR(beta).stateSpace() );
- for( R_nb_cit alpha = nbIR(beta).begin(); alpha != nbIR(beta).end(); alpha++ ) {
- muab(*alpha,beta).fill( 1.0 / IR(beta).stateSpace() );
- muba(beta,*alpha).fill( 1.0 / IR(beta).stateSpace() );
+ foreach( const Neighbor &alpha, nbIR(beta) ) {
+ size_t _beta = alpha.dual;
+ muab( alpha, _beta ).fill( 1.0 / IR(beta).stateSpace() );
+ muba( alpha, _beta ).fill( 1.0 / IR(beta).stateSpace() );
}
}
}
for( vector<Factor>::iterator beta = _Qb.begin(); beta != _Qb.end(); beta++ )
beta->fill( 1.0 / beta->stateSpace() );
- for( size_t ab = 0; ab < nr_Redges(); ab++ ) {
- _muab[ab].fill( 1.0 / _muab[ab].stateSpace() );
- _muba[ab].fill( 1.0 / _muba[ab].stateSpace() );
- }
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
+ foreach( const Neighbor &beta, nbOR(alpha) ) {
+ size_t _beta = beta.iter;
+ muab( alpha, _beta ).fill( 1.0 / muab( alpha, _beta ).stateSpace() );
+ muba( alpha, _beta ).fill( 1.0 / muab( alpha, _beta ).stateSpace() );
+ }
}
clock_t tic = toc();
// Check whether counting numbers won't lead to problems
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
assert( nbIR(beta).size() + IR(beta).c() != 0.0 );
// Keep old beliefs to check convergence
// do several passes over the network until maximum number of iterations has
// been reached or until the maximum belief difference is smaller than tolerance
for( iter = 0; iter < MaxIter() && diffs.max() > Tol(); iter++ ) {
- for( size_t beta = 0; beta < nr_IRs(); beta++ ) {
- for( R_nb_cit alpha = nbIR(beta).begin(); alpha != nbIR(beta).end(); alpha++ )
- muab(*alpha,beta) = _Qa[*alpha].marginal(IR(beta)).divided_by( muba(beta,*alpha) );
+ for( size_t beta = 0; beta < nrIRs(); beta++ ) {
+ foreach( const Neighbor &alpha, nbIR(beta) ) {
+ size_t _beta = alpha.dual;
+ muab( alpha, _beta ) = _Qa[alpha].marginal(IR(beta)).divided_by( muba(alpha,_beta) );
+ }
Factor Qb_new;
- for( R_nb_cit alpha = nbIR(beta).begin(); alpha != nbIR(beta).end(); alpha++ )
- Qb_new *= muab(*alpha,beta) ^ (1 / (nbIR(beta).size() + IR(beta).c()));
+ foreach( const Neighbor &alpha, nbIR(beta) ) {
+ size_t _beta = alpha.dual;
+ Qb_new *= muab(alpha,_beta) ^ (1 / (nbIR(beta).size() + IR(beta).c()));
+ }
+
Qb_new.normalize( _normtype );
if( Qb_new.hasNaNs() ) {
cout << "HAK::doGBP: Qb_new has NaNs!" << endl;
// _Qb[beta] = Qb_new.makeZero(1e-100); // damping?
_Qb[beta] = Qb_new;
- for( R_nb_cit alpha = nbIR(beta).begin(); alpha != nbIR(beta).end(); alpha++ ) {
- muba(beta,*alpha) = _Qb[beta].divided_by( muab(*alpha,beta) );
+ foreach( const Neighbor &alpha, nbIR(beta) ) {
+ size_t _beta = alpha.dual;
+
+ muba(alpha,_beta) = _Qb[beta].divided_by( muab(alpha,_beta) );
- Factor Qa_new = OR(*alpha);
- for( R_nb_cit gamma = nbOR(*alpha).begin(); gamma != nbOR(*alpha).end(); gamma++ )
- Qa_new *= muba(*gamma,*alpha);
- Qa_new ^= (1.0 / OR(*alpha).c());
+ Factor Qa_new = OR(alpha);
+ foreach( const Neighbor &gamma, nbOR(alpha) )
+ Qa_new *= muba(alpha,gamma.iter);
+ Qa_new ^= (1.0 / OR(alpha).c());
Qa_new.normalize( _normtype );
if( Qa_new.hasNaNs() ) {
cout << "HAK::doGBP: Qa_new has NaNs!" << endl;
return NAN;
}
-// _Qa[*alpha] = Qa_new.makeZero(1e-100); // damping?
- _Qa[*alpha] = Qa_new;
+// _Qa[alpha] = Qa_new.makeZero(1e-100); // damping?
+ _Qa[alpha] = Qa_new;
}
}
clock_t tic = toc();
// Save original outer regions
- vector<FRegion> org_ORs = ORs();
+ vector<FRegion> org_ORs = ORs;
// Save original inner counting numbers and set negative counting numbers to zero
- vector<double> org_IR_cs( nr_IRs(), 0.0 );
- for( size_t beta = 0; beta < nr_IRs(); beta++ ) {
+ vector<double> org_IR_cs( nrIRs(), 0.0 );
+ for( size_t beta = 0; beta < nrIRs(); beta++ ) {
org_IR_cs[beta] = IR(beta).c();
if( IR(beta).c() < 0.0 )
IR(beta).c() = 0.0;
// Differences in single node beliefs
Diffs diffs(nrVars(), 1.0);
- size_t outer_maxiter = MaxIter();
+ size_t outer_maxiter = MaxIter();
double outer_tol = Tol();
- size_t outer_verbose = Verbose();
+ size_t outer_verbose = Verbose();
double org_maxdiff = MaxDiff();
// Set parameters for inner loop
size_t outer_iter = 0;
for( outer_iter = 0; outer_iter < outer_maxiter && diffs.max() > outer_tol; outer_iter++ ) {
// Calculate new outer regions
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
OR(alpha) = org_ORs[alpha];
- for( R_nb_cit beta = nbOR(alpha).begin(); beta != nbOR(alpha).end(); beta++ )
- OR(alpha) *= _Qb[*beta] ^ ((IR(*beta).c() - org_IR_cs[*beta]) / nbIR(*beta).size());
+ foreach( const Neighbor &beta, nbOR(alpha) )
+ OR(alpha) *= _Qb[beta] ^ ((IR(beta).c() - org_IR_cs[beta]) / nbIR(beta).size());
}
// Inner loop
updateMaxDiff( diffs.max() );
// Restore original outer regions
- ORs() = org_ORs;
+ ORs = org_ORs;
// Restore original inner counting numbers
- for( size_t beta = 0; beta < nr_IRs(); ++beta )
+ for( size_t beta = 0; beta < nrIRs(); ++beta )
IR(beta).c() = org_IR_cs[beta];
if( Verbose() >= 1 ) {
vector<Factor> HAK::beliefs() const {
vector<Factor> result;
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
result.push_back( Qb(beta) );
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
result.push_back( Qa(alpha) );
return result;
}
Complex HAK::logZ() const {
Complex sum = 0.0;
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
sum += Complex(IR(beta).c()) * Qb(beta).entropy();
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
sum += Complex(OR(alpha).c()) * Qa(alpha).entropy();
sum += (OR(alpha).log0() * Qa(alpha)).totalSum();
}
}
-JTree::JTree( const FactorGraph &fg, const Properties &opts, bool automatic) : DAIAlgRG(fg, opts), _RTree(), _Qa(), _Qb(), _mes(), _logZ() {
+JTree::JTree( const FactorGraph &fg, const Properties &opts, bool automatic ) : DAIAlgRG(fg, opts), _RTree(), _Qa(), _Qb(), _mes(), _logZ() {
assert( checkProperties() );
if( automatic ) {
// Construct corresponding region graph
// Create outer regions
- ORs().reserve( Cliques.size() );
+ ORs.reserve( Cliques.size() );
for( size_t i = 0; i < Cliques.size(); i++ )
- ORs().push_back( FRegion( Factor(Cliques[i], 1.0), 1.0 ) );
+ ORs.push_back( FRegion( Factor(Cliques[i], 1.0), 1.0 ) );
// For each factor, find an outer region that subsumes that factor.
// Then, multiply the outer region with that factor.
for( size_t I = 0; I < nrFactors(); I++ ) {
size_t alpha;
- for( alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( alpha = 0; alpha < nrORs(); alpha++ )
if( OR(alpha).vars() >> factor(I).vars() ) {
// OR(alpha) *= factor(I);
- _fac2OR[I] = alpha;
+ fac2OR.push_back( alpha );
break;
}
- assert( alpha != nr_ORs() );
+ assert( alpha != nrORs() );
}
RecomputeORs();
// Create inner regions and edges
- IRs().reserve( _RTree.size() );
- Redges().reserve( 2 * _RTree.size() );
+ IRs.reserve( _RTree.size() );
+ typedef pair<size_t,size_t> Edge;
+ vector<Edge> edges;
+ edges.reserve( 2 * _RTree.size() );
for( size_t i = 0; i < _RTree.size(); i++ ) {
- Redges().push_back( R_edge_t( _RTree[i].n1, IRs().size() ) );
- Redges().push_back( R_edge_t( _RTree[i].n2, IRs().size() ) );
+ edges.push_back( Edge( _RTree[i].n1, nrIRs() ) );
+ edges.push_back( Edge( _RTree[i].n2, nrIRs() ) );
// inner clusters have counting number -1
- IRs().push_back( Region( Cliques[_RTree[i].n1] & Cliques[_RTree[i].n2], -1.0 ) );
+ IRs.push_back( Region( Cliques[_RTree[i].n1] & Cliques[_RTree[i].n2], -1.0 ) );
}
- // Regenerate BipartiteGraph internals
- Regenerate();
+ // create bipartite graph
+ G.create( nrORs(), nrIRs(), edges.begin(), edges.end() );
// Create messages and beliefs
_Qa.clear();
- _Qa.reserve( nr_ORs() );
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ _Qa.reserve( nrORs() );
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
_Qa.push_back( OR(alpha) );
_Qb.clear();
- _Qb.reserve( nr_IRs() );
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ _Qb.reserve( nrIRs() );
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
_Qb.push_back( Factor( IR(beta), 1.0 ) );
_mes.clear();
- _mes.reserve( nr_Redges() );
- for( size_t e = 0; e < nr_Redges(); e++ )
- _mes.push_back( Factor( IR(Redge(e).second), 1.0 ) );
+ _mes.reserve( nrORs() );
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
+ _mes.push_back( vector<Factor>() );
+ _mes[alpha].reserve( nbOR(alpha).size() );
+ foreach( const Neighbor &beta, nbOR(alpha) )
+ _mes[alpha].push_back( Factor( IR(beta), 1.0 ) );
+ }
// Check counting numbers
Check_Counting_Numbers();
vector<Factor> JTree::beliefs() const {
vector<Factor> result;
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
result.push_back( _Qb[beta] );
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
result.push_back( _Qa[alpha] );
return result;
}
// Needs no init
void JTree::runHUGIN() {
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
_Qa[alpha] = OR(alpha);
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
_Qb[beta].fill( 1.0 );
// CollectEvidence
}
// Normalize
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
_Qa[alpha].normalize( Prob::NORMPROB );
}
void JTree::runShaferShenoy() {
// First pass
_logZ = 0.0;
- for( size_t e = _RTree.size(); (e--) != 0; ) {
+ for( size_t e = nrIRs(); (e--) != 0; ) {
// send a message from _RTree[e].n2 to _RTree[e].n1
// or, actually, from the seperator IR(e) to _RTree[e].n1
- size_t i = _RTree[e].n2;
- size_t j = _RTree[e].n1;
+ size_t i = nbIR(e)[1].node; // = _RTree[e].n2
+ size_t j = nbIR(e)[0].node; // = _RTree[e].n1
+ size_t _e = nbIR(e)[0].dual;
Factor piet = OR(i);
- for( R_nb_cit k = nbOR(i).begin(); k != nbOR(i).end(); k++ )
- if( *k != e )
- piet *= message( i, *k );
- message( j, e ) = piet.part_sum( IR(e) );
- _logZ += log( message(j,e).normalize( Prob::NORMPROB ) );
+ foreach( const Neighbor &k, nbOR(i) )
+ if( k != e )
+ piet *= message( i, k.iter );
+ message( j, _e ) = piet.part_sum( IR(e) );
+ _logZ += log( message(j,_e).normalize( Prob::NORMPROB ) );
}
// Second pass
- for( size_t e = 0; e < _RTree.size(); e++ ) {
- size_t i = _RTree[e].n1;
- size_t j = _RTree[e].n2;
+ for( size_t e = 0; e < nrIRs(); e++ ) {
+ size_t i = nbIR(e)[0].node; // = _RTree[e].n1
+ size_t j = nbIR(e)[1].node; // = _RTree[e].n2
+ size_t _e = nbIR(e)[1].dual;
Factor piet = OR(i);
- for( R_nb_cit k = nbOR(i).begin(); k != nbOR(i).end(); k++ )
- if( *k != e )
- piet *= message( i, *k );
- message( j, e ) = piet.marginal( IR(e) );
+ foreach( const Neighbor &k, nbOR(i) )
+ if( k != e )
+ piet *= message( i, k.iter );
+ message( j, _e ) = piet.marginal( IR(e) );
}
// Calculate beliefs
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
Factor piet = OR(alpha);
- for( R_nb_cit k = nbOR(alpha).begin(); k != nbOR(alpha).end(); k++ )
- piet *= message( alpha, *k );
- if( _RTree.empty() ) {
+ foreach( const Neighbor &k, nbOR(alpha) )
+ piet *= message( alpha, k.iter );
+ if( nrIRs() == 0 ) {
_logZ += log( piet.normalize( Prob::NORMPROB ) );
_Qa[alpha] = piet;
- } else if( alpha == _RTree[0].n1 ) {
+ } else if( alpha == nbIR(0)[0].node /*_RTree[0].n1*/ ) {
_logZ += log( piet.normalize( Prob::NORMPROB ) );
_Qa[alpha] = piet;
} else
}
// Only for logZ (and for belief)...
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
- _Qb[beta] = _Qa[nbIR(beta)[0]].marginal( IR(beta) );
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
+ _Qb[beta] = _Qa[nbIR(beta)[0].node].marginal( IR(beta) );
}
Complex JTree::logZ() const {
Complex sum = 0.0;
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
sum += Complex(IR(beta).c()) * _Qb[beta].entropy();
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
sum += Complex(OR(alpha).c()) * _Qa[alpha].entropy();
sum += (OR(alpha).log0() * _Qa[alpha]).totalSum();
}
size_t JTree::findEfficientTree( const VarSet& ns, DEdgeVec &Tree, size_t PreviousRoot ) const {
// find new root clique (the one with maximal statespace overlap with ns)
size_t maxval = 0, maxalpha = 0;
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
size_t val = (ns & OR(alpha).vars()).stateSpace();
if( val > maxval ) {
maxval = val;
size_t alpha1 = T[i].n1;
size_t alpha2 = T[i].n2;
size_t beta;
- for( beta = 0; beta < nr_IRs(); beta++ )
+ for( beta = 0; beta < nrIRs(); beta++ )
if( UEdge( _RTree[beta].n1, _RTree[beta].n2 ) == UEdge( alpha1, alpha2 ) )
break;
- assert( beta != nr_IRs() );
+ assert( beta != nrIRs() );
b[i] = beta;
if( !_Qa_old.count( alpha1 ) )
LC::LC(const FactorGraph & fg, const Properties &opts) : DAIAlgFG(fg, opts) {
assert( checkProperties() );
- // calc iI
- for( size_t i=0; i < nrVars(); i++ ) {
- for( _nb_cit I = nb1(i).begin(); I != nb1(i).end(); I++ ) {
- _iI_type _iI_entry;
- _iI_entry.i = i;
- _iI_entry.I = *I;
-
- _iI.push_back(_iI_entry);
- }
- }
-
// create pancakes
_pancakes.resize(nrVars());
// create cavitydists
for( size_t i=0; i < nrVars(); i++ )
- _cavitydists.push_back(Factor(delta(var(i))));
+ _cavitydists.push_back(Factor(delta(i)));
// create phis
- _phis.reserve(nr_edges());
- for( size_t iI = 0; iI < nr_edges(); iI++ ) {
- size_t i = edge(iI).first;
- size_t I = edge(iI).second;
- _phis.push_back( Factor( factor(I).vars() / var(i) ) );
+ _phis.reserve( nrVars() );
+ for( size_t i = 0; i < nrVars(); i++ ) {
+ _phis.push_back( vector<Factor>() );
+ _phis[i].reserve( nbV(i).size() );
+ foreach( const Neighbor &I, nbV(i) )
+ _phis[i].push_back( Factor( factor(I).vars() / var(i) ) );
}
// create beliefs
double maxdiff = 0;
if( Verbose() >= 2 )
- cout << "Initing cavity " << var(i) << "(" << delta(var(i)).size() << " vars, " << delta(var(i)).stateSpace() << " states)" << endl;
+ cout << "Initing cavity " << var(i) << "(" << delta(i).size() << " vars, " << delta(i).stateSpace() << " states)" << endl;
if( Cavity() == CavityType::UNIFORM )
- Bi = Factor(delta(var(i)));
+ Bi = Factor(delta(i));
else {
InfAlg *cav = newInfAlg( name, *this, opts );
- cav->makeCavity( var(i) );
+ cav->makeCavity( i );
if( Cavity() == CavityType::FULL )
- Bi = calcMarginal( *cav, cav->delta(var(i)), reInit() );
+ Bi = calcMarginal( *cav, cav->delta(i), reInit() );
else if( Cavity() == CavityType::PAIR )
- Bi = calcMarginal2ndO( *cav, cav->delta(var(i)), reInit() );
+ Bi = calcMarginal2ndO( *cav, cav->delta(i), reInit() );
else if( Cavity() == CavityType::PAIR2 ) {
- vector<Factor> pairbeliefs = calcPairBeliefsNew( *cav, cav->delta(var(i)), reInit() );
+ vector<Factor> pairbeliefs = calcPairBeliefsNew( *cav, cav->delta(i), reInit() );
for( size_t ij = 0; ij < pairbeliefs.size(); ij++ )
Bi *= pairbeliefs[ij];
} else if( Cavity() == CavityType::PAIRINT ) {
- Bi = calcMarginal( *cav, cav->delta(var(i)), reInit() );
+ Bi = calcMarginal( *cav, cav->delta(i), reInit() );
// Set interactions of order > 2 to zero
- size_t N = delta(var(i)).size();
+ size_t N = delta(i).size();
Real *p = &(*Bi.p().p().begin());
x2x::p2logp (N, p);
x2x::logp2w (N, p);
// x2x::logpnorm (N, p);
x2x::logp2p (N, p);
} else if( Cavity() == CavityType::PAIRCUM ) {
- Bi = calcMarginal( *cav, cav->delta(var(i)), reInit() );
+ Bi = calcMarginal( *cav, cav->delta(i), reInit() );
// Set cumulants of order > 2 to zero
- size_t N = delta(var(i)).size();
+ size_t N = delta(i).size();
Real *p = &(*Bi.p().p().begin());
x2x::p2m (N, p);
x2x::m2c (N, p, N);
void LC::init() {
- for( size_t iI = 0; iI < nr_edges(); iI++ ) {
- if( Updates() == UpdateType::SEQRND )
- _phis[iI].randomize();
- else
- _phis[iI].fill(1.0);
- }
+ for( size_t i = 0; i < nrVars(); ++i )
+ foreach( const Neighbor &I, nbV(i) )
+ if( Updates() == UpdateType::SEQRND )
+ _phis[i][I.iter].randomize();
+ else
+ _phis[i][I.iter].fill(1.0);
for( size_t i = 0; i < nrVars(); i++ ) {
_pancakes[i] = _cavitydists[i];
- for( _nb_cit I = nb1(i).begin(); I != nb1(i).end(); I++ ) {
- _pancakes[i] *= factor(*I);
+ foreach( const Neighbor &I, nbV(i) ) {
+ _pancakes[i] *= factor(I);
if( Updates() == UpdateType::SEQRND )
- _pancakes[i] *= _phis[VV2E(i,*I)];
+ _pancakes[i] *= _phis[i][I.iter];
}
_pancakes[i].normalize( _normtype );
}
-Factor LC::NewPancake (size_t iI, bool & hasNaNs) {
- size_t i = _iI[iI].i;
- size_t I = _iI[iI].I;
- iI = VV2E(i, I);
-
+Factor LC::NewPancake (size_t i, size_t _I, bool & hasNaNs) {
+ size_t I = nbV(i)[_I];
Factor piet = _pancakes[i];
// recalculate _pancake[i]
A_I *= (_pancakes[findVar(*k)] * factor(I).inverse()).part_sum( Ivars / var(i) );
if( Ivars.size() > 1 )
A_I ^= (1.0 / (Ivars.size() - 1));
- Factor A_Ii = (_pancakes[i] * factor(I).inverse() * _phis[iI].inverse()).part_sum( Ivars / var(i) );
+ Factor A_Ii = (_pancakes[i] * factor(I).inverse() * _phis[i][_I].inverse()).part_sum( Ivars / var(i) );
Factor quot = A_I.divided_by(A_Ii);
- piet *= quot.divided_by( _phis[iI] ).normalized( _normtype );
- _phis[iI] = quot.normalized( _normtype );
+ piet *= quot.divided_by( _phis[i][_I] ).normalized( _normtype );
+ _phis[i][_I] = quot.normalized( _normtype );
piet.normalize( _normtype );
if( piet.hasNaNs() ) {
- cout << "LC::NewPancake(" << iI << "): has NaNs!" << endl;
+ cout << "LC::NewPancake(" << i << ", " << _I << "): has NaNs!" << endl;
hasNaNs = true;
}
return NAN;
}
- vector<long> update_seq(nr_iI(),0);
- for( size_t k=0; k < nr_iI(); k++ )
- update_seq[k] = k;
+ size_t nredges = nrEdges();
+ typedef pair<size_t,size_t> Edge;
+ vector<Edge> update_seq;
+ update_seq.reserve( nredges );
+ for( size_t i = 0; i < nrVars(); ++i )
+ foreach( const Neighbor &I, nbV(i) )
+ update_seq.push_back( Edge( i, I.iter ) );
- size_t iter=0;
+ size_t iter = 0;
// do several passes over the network until maximum number of iterations has
// been reached or until the maximum belief difference is smaller than tolerance
if( Updates() == UpdateType::SEQRND )
random_shuffle( update_seq.begin(), update_seq.end() );
- for( size_t t=0; t < nr_iI(); t++ ) {
- long iI = update_seq[t];
- long i = _iI[iI].i;
- _pancakes[i] = NewPancake(iI, hasNaNs);
+ for( size_t t=0; t < nredges; t++ ) {
+ size_t i = update_seq[t].first;
+ size_t _I = update_seq[t].second;
+ _pancakes[i] = NewPancake( i, _I, hasNaNs);
if( hasNaNs )
return NAN;
- CalcBelief(i);
+ CalcBelief( i );
}
// compare new beliefs with old ones
void MF::Regenerate() {
- DAIAlgFG::Regenerate();
+// DAIAlgFG::Regenerate();
// clear beliefs
_beliefs.clear();
_beliefs.reserve( nrVars() );
// create beliefs
- for( vector<Var>::const_iterator i = vars().begin(); i != vars().end(); i++ )
- _beliefs.push_back(Factor(*i));
+ for( size_t i = 0; i < nrVars(); ++i )
+ _beliefs.push_back(Factor(var(i)));
}
Factor jan;
Factor piet;
- for( _nb_cit I = nb1(i).begin(); I != nb1(i).end(); I++ ) {
-
+ foreach( const Neighbor &I, nbV(i) ) {
Factor henk;
- for( _nb_cit j = nb2(*I).begin(); j != nb2(*I).end(); j++ ) // for all j in I \ i
- if( *j != i )
- henk *= _beliefs[*j];
- piet = factor(*I).log0();
+ foreach( const Neighbor &j, nbF(I) ) // for all j in I \ i
+ if( j != i )
+ henk *= _beliefs[j];
+ piet = factor(I).log0();
piet *= henk;
piet = piet.part_sum(var(i));
piet = piet.exp();
jan *= piet;
}
-
+
jan.normalize( _normtype );
if( jan.hasNaNs() ) {
}
-Factor MF::belief1 (size_t i) const {
+Factor MF::beliefV (size_t i) const {
Factor piet;
piet = _beliefs[i];
piet.normalize( Prob::NORMPROB );
Factor MF::belief (const Var &n) const {
- return( belief1( findVar( n) ) );
+ return( beliefV( findVar( n ) ) );
}
vector<Factor> MF::beliefs() const {
vector<Factor> result;
for( size_t i = 0; i < nrVars(); i++ )
- result.push_back( belief1(i) );
+ result.push_back( beliefV(i) );
return result;
}
Complex sum = 0.0;
for(size_t i=0; i < nrVars(); i++ )
- sum -= belief1(i).entropy();
+ sum -= beliefV(i).entropy();
for(size_t I=0; I < nrFactors(); I++ ) {
Factor henk;
- for( _nb_cit j = nb2(I).begin(); j != nb2(I).end(); j++ ) // for all j in I
- henk *= _beliefs[*j];
+ foreach( const Neighbor &j, nbF(I) ) // for all j in I
+ henk *= _beliefs[j];
henk.normalize( Prob::NORMPROB );
Factor piet;
piet = factor(I).log0();
vector<Factor> pairq;
if( Inits() == InitType::CLAMPING ) {
BP bpcav(*this, Properties()("updates",string("SEQMAX"))("tol", string("1e-9"))("maxiter", string("1000UL"))("verbose", string("0UL")));
- bpcav.makeCavity( var(i) );
- pairq = calcPairBeliefs( bpcav, delta(var(i)), false );
+ bpcav.makeCavity( i );
+ pairq = calcPairBeliefs( bpcav, delta(i), false );
} else if( Inits() == InitType::EXACT ) {
JTree jtcav(*this, Properties()("updates",string("HUGIN"))("verbose", string("0UL")) );
- jtcav.makeCavity( var(i) );
- pairq = calcPairBeliefs( jtcav, delta(var(i)), false );
+ jtcav.makeCavity( i );
+ pairq = calcPairBeliefs( jtcav, delta(i), false );
}
for( size_t jk = 0; jk < pairq.size(); jk++ ) {
VarSet::const_iterator kit = pairq[jk].vars().begin();
// check whether all vars in fg are binary
// check whether connectivity is <= kmax
for( size_t i = 0; i < fg.nrVars(); i++ )
- if( (fg.var(i).states() > 2) || (fg.delta(fg.var(i)).size() > kmax) ) {
+ if( (fg.var(i).states() > 2) || (fg.delta(i).size() > kmax) ) {
supported = false;
break;
}
os << boost::any_cast<bool>(p.second);
else if( p.second.type() == typeid(Properties) )
os << boost::any_cast<Properties>(p.second);
+#ifdef WITH_BP
else if( p.second.type() == typeid(BP::UpdateType) )
os << boost::any_cast<BP::UpdateType>(p.second);
+#endif
+#ifdef WITH_HAK
else if( p.second.type() == typeid(HAK::ClustersType) )
os << boost::any_cast<HAK::ClustersType>(p.second);
+#endif
+#ifdef WITH_JTREE
else if( p.second.type() == typeid(JTree::UpdateType) )
os << boost::any_cast<JTree::UpdateType>(p.second);
+#endif
+#ifdef WITH_MR
else if( p.second.type() == typeid(MR::UpdateType) )
os << boost::any_cast<MR::UpdateType>(p.second);
else if( p.second.type() == typeid(MR::InitType) )
os << boost::any_cast<MR::InitType>(p.second);
+#endif
+#ifdef WITH_TREEEP
else if( p.second.type() == typeid(TreeEP::TypeType) )
os << boost::any_cast<TreeEP::TypeType>(p.second);
+#endif
+#ifdef WITH_LC
else if( p.second.type() == typeid(LC::CavityType) )
os << boost::any_cast<LC::CavityType>(p.second);
else if( p.second.type() == typeid(LC::UpdateType) )
os << boost::any_cast<LC::UpdateType>(p.second);
+#endif
else
throw "Unknown property type";
return( os );
using namespace std;
-RegionGraph::RegionGraph(const FactorGraph & fg, const vector<Region> & ors, const vector<Region> & irs, const vector<R_edge_t> & edges) : FactorGraph(fg), BipRegGraph(), _fac2OR() {
+RegionGraph::RegionGraph( const FactorGraph &fg, const vector<Region> &ors, const vector<Region> &irs, const vector<pair<size_t,size_t> > &edges) : FactorGraph(fg), G(), ORs(), IRs(irs), fac2OR() {
// Copy outer regions (giving them counting number 1.0)
- ORs().reserve( ors.size() );
+ ORs.reserve( ors.size() );
for( vector<Region>::const_iterator alpha = ors.begin(); alpha != ors.end(); alpha++ )
- ORs().push_back( FRegion(Factor(*alpha, 1.0), 1.0) );
-
- // Incorporate factors into outer regions
-/* if( !_hasNegatives ) {
- // For each factor, find the outer regions that subsume that factor.
- // Then, "divide" the factor over its subsuming outer regions.
- for( size_t I = 0; I < nrFactors(); I++ ) {
- vector<size_t> subsuming_alphas;
-
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
- if( OR(alpha).vars() >> factor(I).vars() )
- subsuming_alphas.push_back( alpha );
- }
-
- assert( subsuming_alphas.size() >= 1 );
+ ORs.push_back( FRegion(Factor(*alpha, 1.0), 1.0) );
- for( vector<size_t>::const_iterator alpha = subsuming_alphas.begin(); alpha != subsuming_alphas.end(); alpha++ )
- OR(*alpha) *= (factor(I) ^ (1.0 / subsuming_alphas.size()));
- }
- } else {
- cout << "Careful composition of outer regions" << endl;
-*/
// For each factor, find an outer regions that subsumes that factor.
// Then, multiply the outer region with that factor.
+ fac2OR.reserve( nrFactors() );
for( size_t I = 0; I < nrFactors(); I++ ) {
size_t alpha;
- for( alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( alpha = 0; alpha < nrORs(); alpha++ )
if( OR(alpha).vars() >> factor(I).vars() ) {
- _fac2OR[I] = alpha;
+ fac2OR.push_back( alpha );
// OR(alpha) *= factor(I);
break;
}
- assert( alpha != nr_ORs() );
+ assert( alpha != nrORs() );
}
RecomputeORs();
-// }
-
- // Copy inner regions
- IRs().reserve( irs.size() );
- for( vector<Region>::const_iterator beta = irs.begin(); beta != irs.end(); beta++ )
- IRs().push_back( *beta );
- // Copy edges
- Redges().reserve( edges.size() );
- for( vector<R_edge_t>::const_iterator e = edges.begin(); e != edges.end(); e++ )
- Redges().push_back( *e );
-
- // Regenerate BipartiteGraph internals
- BipRegGraph::Regenerate();
+ // create bipartite graph
+ G.create( nrORs(), nrIRs(), edges.begin(), edges.end() );
// Check counting numbers
+#ifdef DAI_DEBUG
Check_Counting_Numbers();
+#endif
}
// CVM style
-RegionGraph::RegionGraph(const FactorGraph & fg, const vector<VarSet> & cl) : FactorGraph(fg), BipRegGraph() {
+RegionGraph::RegionGraph( const FactorGraph &fg, const vector<VarSet> &cl ) : FactorGraph(fg), G(), ORs(), IRs(), fac2OR() {
// Retain only maximal clusters
ClusterGraph cg( cl );
cg.eraseNonMaximal();
// Create outer regions, giving them counting number 1.0
- ORs().reserve( cg.size() );
+ ORs.reserve( cg.size() );
for( ClusterGraph::const_iterator alpha = cg.begin(); alpha != cg.end(); alpha++ )
- ORs().push_back( FRegion(Factor(*alpha, 1.0), 1.0) );
-
- // Incorporate factors into outer regions
-/* if( !_hasNegatives ) {
- // For each factor, find the outer regions that subsume that factor.
- // Then, "divide" the factor over its subsuming outer regions.
- for( size_t I = 0; I < nrFactors(); I++ ) {
- vector<size_t> subsuming_alphas;
-
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
- if( OR(alpha).vars() >> factor(I).vars() )
- subsuming_alphas.push_back( alpha );
- }
-
- assert( subsuming_alphas.size() >= 1 );
+ ORs.push_back( FRegion(Factor(*alpha, 1.0), 1.0) );
- for( vector<size_t>::const_iterator alpha = subsuming_alphas.begin(); alpha != subsuming_alphas.end(); alpha++ )
- OR(*alpha) *= factor(I) ^ (1.0 / subsuming_alphas.size());
- }
- } else {
- cout << "Careful composition of outer regions" << endl;
-*/
// For each factor, find an outer regions that subsumes that factor.
// Then, multiply the outer region with that factor.
+ fac2OR.reserve( nrFactors() );
for( size_t I = 0; I < nrFactors(); I++ ) {
size_t alpha;
- for( alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( alpha = 0; alpha < nrORs(); alpha++ )
if( OR(alpha).vars() >> factor(I).vars() ) {
- _fac2OR[I] = alpha;
+ fac2OR.push_back( alpha );
// OR(alpha) *= factor(I);
break;
}
- assert( alpha != nr_ORs() );
+ assert( alpha != nrORs() );
}
RecomputeORs();
-// }
// Create inner regions - first pass
set<VarSet> betas;
} while( new_betas.size() );
// Create inner regions - store them in the bipartite graph
- IRs().reserve( betas.size() );
+ IRs.reserve( betas.size() );
for( set<VarSet>::const_iterator beta = betas.begin(); beta != betas.end(); beta++ )
- IRs().push_back( Region(*beta,NAN) );
+ IRs.push_back( Region(*beta,NAN) );
// Create edges
- for( size_t beta = 0; beta < nr_IRs(); beta++ ) {
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ ) {
+ vector<pair<size_t,size_t> > edges;
+ for( size_t beta = 0; beta < nrIRs(); beta++ ) {
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
if( OR(alpha).vars() >> IR(beta) )
- Redges().push_back(R_edge_t(alpha,beta));
+ edges.push_back( pair<size_t,size_t>(alpha,beta) );
}
}
- // Regenerate BipartiteGraph internals
- BipRegGraph::Regenerate();
+ // create bipartite graph
+ G.create( nrORs(), nrIRs(), edges.begin(), edges.end() );
// Calculate counting numbers
Calc_Counting_Numbers();
// Check counting numbers
+#ifdef DAI_DEBUG
Check_Counting_Numbers();
+#endif
}
void RegionGraph::Calc_Counting_Numbers() {
// Calculates counting numbers of inner regions based upon counting numbers of outer regions
- vector<vector<size_t> > ancestors(nr_IRs());
- for( size_t beta = 0; beta < nr_IRs(); beta++ ) {
+ vector<vector<size_t> > ancestors(nrIRs());
+ for( size_t beta = 0; beta < nrIRs(); beta++ ) {
IR(beta).c() = NAN;
- for( size_t beta2 = 0; beta2 < nr_IRs(); beta2++ )
+ for( size_t beta2 = 0; beta2 < nrIRs(); beta2++ )
if( (beta2 != beta) && IR(beta2) >> IR(beta) )
ancestors[beta].push_back(beta2);
}
bool new_counting;
do {
new_counting = false;
- for( size_t beta = 0; beta < nr_IRs(); beta++ ) {
+ for( size_t beta = 0; beta < nrIRs(); beta++ ) {
if( isnan( IR(beta).c() ) ) {
bool has_nan_ancestor = false;
for( vector<size_t>::const_iterator beta2 = ancestors[beta].begin(); (beta2 != ancestors[beta].end()) && !has_nan_ancestor; beta2++ )
has_nan_ancestor = true;
if( !has_nan_ancestor ) {
double c = 1.0;
- for( R_nb_cit alpha = nbIR(beta).begin(); alpha != nbIR(beta).end(); alpha++ )
- c -= OR(*alpha).c();
+ foreach( const Neighbor &alpha, nbIR(beta) )
+ c -= OR(alpha).c();
for( vector<size_t>::const_iterator beta2 = ancestors[beta].begin(); beta2 != ancestors[beta].end(); beta2++ )
c -= IR(*beta2).c();
IR(beta).c() = c;
// Checks whether the counting numbers satisfy the fundamental relation
bool all_valid = true;
- for( vector<Var>::const_iterator n = vars().begin(); n != vars().end(); n++ ) {
+ for( vector<Var>::const_iterator n = vars.begin(); n != vars.end(); n++ ) {
double c_n = 0.0;
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
if( OR(alpha).vars() && *n )
c_n += OR(alpha).c();
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
if( IR(beta) && *n )
c_n += IR(beta).c();
if( fabs(c_n - 1.0) > 1e-15 ) {
void RegionGraph::RecomputeORs() {
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
OR(alpha).fill( 1.0 );
- for( fac2OR_cit I = _fac2OR.begin(); I != _fac2OR.end(); I++ )
- OR( I->second ) *= factor( I->first );
+ for( size_t I = 0; I < nrFactors(); I++ )
+ if( fac2OR[I] != -1U )
+ OR( fac2OR[I] ) *= factor( I );
}
void RegionGraph::RecomputeORs( const VarSet &ns ) {
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
if( OR(alpha).vars() && ns )
OR(alpha).fill( 1.0 );
- for( fac2OR_cit I = _fac2OR.begin(); I != _fac2OR.end(); I++ )
- if( OR( I->second ).vars() && ns )
- OR( I->second ) *= factor( I->first );
+ for( size_t I = 0; I < nrFactors(); I++ )
+ if( fac2OR[I] != -1U )
+ if( OR( fac2OR[I] ).vars() && ns )
+ OR( fac2OR[I] ) *= factor( I );
}
void RegionGraph::RecomputeOR( size_t I ) {
- if( _fac2OR.count(I) ) {
- size_t alpha = _fac2OR[I];
+ assert( I < nrFactors() );
+ if( fac2OR[I] != -1U ) {
+ size_t alpha = fac2OR[I];
OR(alpha).fill( 1.0 );
- for( fac2OR_cit I = _fac2OR.begin(); I != _fac2OR.end(); I++ )
- if( I->second == alpha )
- OR(alpha) *= factor( I->first );
+ for( size_t J = 0; J < nrFactors(); J++ )
+ if( fac2OR[J] == alpha )
+ OR(alpha) *= factor( J );
}
}
ostream & operator << (ostream & os, const RegionGraph & rg) {
os << "Outer regions" << endl;
- for( size_t alpha = 0; alpha < rg.nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < rg.nrORs(); alpha++ )
os << rg.OR(alpha).vars() << ": c = " << rg.OR(alpha).c() << endl;
os << "Inner regions" << endl;
- for( size_t beta = 0; beta < rg.nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < rg.nrIRs(); beta++ )
os << (VarSet)rg.IR(beta) << ": c = " << rg.IR(beta).c() << endl;
return(os);
TreeEP::TreeEP( const FactorGraph &fg, const Properties &opts ) : JTree(fg, opts("updates",string("HUGIN")), false) {
assert( checkProperties() );
- assert( fg.isConnected() );
+ assert( fg.G.isConnected() );
if( opts.hasKey("tree") ) {
ConstructRG( opts.GetAs<DEdgeVec>("tree") );
// construct weighted graph with as weights a crude estimate of the
// mutual information between the nodes
WeightedGraph<double> wg;
- for( vector<Var>::const_iterator i = vars().begin(); i != vars().end(); i++ ) {
- VarSet di = delta(*i);
+ for( size_t i = 0; i < nrVars(); ++i ) {
+ Var v_i = var(i);
+ VarSet di = delta(i);
for( VarSet::const_iterator j = di.begin(); j != di.end(); j++ )
- if( *i < *j ) {
+ if( v_i < *j ) {
Factor piet;
for( size_t I = 0; I < nrFactors(); I++ ) {
VarSet Ivars = factor(I).vars();
- if( (Ivars == *i) || (Ivars == *j) )
+ if( (Ivars == v_i) || (Ivars == *j) )
piet *= factor(I);
- else if( Ivars >> (*i | *j) )
- piet *= factor(I).marginal( *i | *j );
+ else if( Ivars >> (v_i | *j) )
+ piet *= factor(I).marginal( v_i | *j );
}
- if( piet.vars() >> (*i | *j) ) {
- piet = piet.marginal( *i | *j );
- Factor pietf = piet.marginal(*i) * piet.marginal(*j);
- wg[UEdge(findVar(*i),findVar(*j))] = real( KL_dist( piet, pietf ) );
+ if( piet.vars() >> (v_i | *j) ) {
+ piet = piet.marginal( v_i | *j );
+ Factor pietf = piet.marginal(v_i) * piet.marginal(*j);
+ wg[UEdge(i,findVar(*j))] = real( KL_dist( piet, pietf ) );
} else
- wg[UEdge(findVar(*i),findVar(*j))] = 0;
+ wg[UEdge(i,findVar(*j))] = 0;
}
}
// construct weighted graph with as weights an upper bound on the
// effective interaction strength between pairs of nodes
WeightedGraph<double> wg;
- for( vector<Var>::const_iterator i = vars().begin(); i != vars().end(); i++ ) {
- VarSet di = delta(*i);
+ for( size_t i = 0; i < nrVars(); ++i ) {
+ Var v_i = var(i);
+ VarSet di = delta(i);
for( VarSet::const_iterator j = di.begin(); j != di.end(); j++ )
- if( *i < *j ) {
+ if( v_i < *j ) {
Factor piet;
for( size_t I = 0; I < nrFactors(); I++ ) {
VarSet Ivars = factor(I).vars();
- if( Ivars >> (*i | *j) )
+ if( Ivars >> (v_i | *j) )
piet *= factor(I);
}
- wg[UEdge(findVar(*i),findVar(*j))] = piet.strength(*i, *j);
+ wg[UEdge(i,findVar(*j))] = piet.strength(v_i, *j);
}
}
// Construct corresponding region graph
// Create outer regions
- ORs().reserve( Cliques.size() );
+ ORs.reserve( Cliques.size() );
for( size_t i = 0; i < Cliques.size(); i++ )
- ORs().push_back( FRegion( Factor(Cliques[i], 1.0), 1.0 ) );
+ ORs.push_back( FRegion( Factor(Cliques[i], 1.0), 1.0 ) );
// For each factor, find an outer region that subsumes that factor.
// Then, multiply the outer region with that factor.
// If no outer region can be found subsuming that factor, label the
// factor as off-tree.
+ fac2OR.clear();
+ fac2OR.resize( nrFactors(), -1U );
for( size_t I = 0; I < nrFactors(); I++ ) {
size_t alpha;
- for( alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( alpha = 0; alpha < nrORs(); alpha++ )
if( OR(alpha).vars() >> factor(I).vars() ) {
- _fac2OR[I] = alpha;
+ fac2OR[I] = alpha;
break;
}
// DIFF WITH JTree::GenerateJT: assert
RecomputeORs();
// Create inner regions and edges
- IRs().reserve( _RTree.size() );
- Redges().reserve( 2 * _RTree.size() );
+ IRs.reserve( _RTree.size() );
+ typedef pair<size_t,size_t> Edge;
+ vector<Edge> edges;
+ edges.reserve( 2 * _RTree.size() );
for( size_t i = 0; i < _RTree.size(); i++ ) {
- Redges().push_back( R_edge_t( _RTree[i].n1, IRs().size() ) );
- Redges().push_back( R_edge_t( _RTree[i].n2, IRs().size() ) );
+ edges.push_back( Edge( _RTree[i].n1, IRs.size() ) );
+ edges.push_back( Edge( _RTree[i].n2, IRs.size() ) );
// inner clusters have counting number -1
- IRs().push_back( Region( Cliques[_RTree[i].n1] & Cliques[_RTree[i].n2], -1.0 ) );
+ IRs.push_back( Region( Cliques[_RTree[i].n1] & Cliques[_RTree[i].n2], -1.0 ) );
}
- // Regenerate BipartiteGraph internals
- Regenerate();
+ // create bipartite graph
+ G.create( nrORs(), nrIRs(), edges.begin(), edges.end() );
// Check counting numbers
Check_Counting_Numbers();
// Create messages and beliefs
_Qa.clear();
- _Qa.reserve( nr_ORs() );
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ _Qa.reserve( nrORs() );
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
_Qa.push_back( OR(alpha) );
_Qb.clear();
- _Qb.reserve( nr_IRs() );
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ _Qb.reserve( nrIRs() );
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
_Qb.push_back( Factor( IR(beta), 1.0 ) );
// DIFF with JTree::GenerateJT: no messages
double sum = 0.0;
// entropy of the tree
- for( size_t beta = 0; beta < nr_IRs(); beta++ )
+ for( size_t beta = 0; beta < nrIRs(); beta++ )
sum -= real(_Qb[beta].entropy());
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
sum += real(_Qa[alpha].entropy());
// energy of the on-tree factors
- for( size_t alpha = 0; alpha < nr_ORs(); alpha++ )
+ for( size_t alpha = 0; alpha < nrORs(); alpha++ )
sum += (OR(alpha).log0() * _Qa[alpha]).totalSum();
// energy of the off-tree factors
throw "Please specify all required arguments";
do {
MakeHOIFG( N, M, k, beta, fg );
- } while( !fg.isConnected() );
+ } while( !fg.G.isConnected() );
cout << "# N = " << N << endl;
cout << "# M = " << M << endl;
cerr << "Error reading file " << infile << endl;
return 2;
} else {
- if( string( argv[2] ) == "-" )
+ if( string( argv[2] ) != "-" )
fg.WriteToDotFile( argv[2] );
else {
cout << "graph G {" << endl;
cout << "node[shape=box,style=filled,color=lightgrey,width=0.3,height=0.3,fixedsize=true];" << endl;
for( size_t I = 0; I < fg.nrFactors(); I++ )
cout << "\tp" << I << ";" << endl;
- for( size_t iI = 0; iI < fg.nr_edges(); iI++ )
- cout << "\tx" << fg.var(fg.edge(iI).first).label() << " -- p" << fg.edge(iI).second << ";" << endl;
+ for( size_t i = 0; i < fg.nrVars(); i++ )
+ foreach( const FactorGraph::Neighbor &I, fg.nbV(i) )
+ cout << "\tx" << fg.var(i).label() << " -- p" << I << ";" << endl;
cout << "}" << endl;
}
} else {
cout << "Number of variables: " << fg.nrVars() << endl;
cout << "Number of factors: " << fg.nrFactors() << endl;
- cout << "Connected: " << fg.isConnected() << endl;
+ cout << "Connected: " << fg.G.isConnected() << endl;
// cout << "Treewidth: " << endl;
double cavsum_lcbp = 0.0;
double cavsum_lcbp2 = 0.0;
size_t max_Delta_size = 0;
for( size_t i = 0; i < fg.nrVars(); i++ ) {
- VarSet di = fg.delta(fg.var(i));
- size_t Ds = fg.Delta(fg.var(i)).stateSpace();
+ VarSet di = fg.delta(i);
+ size_t Ds = fg.Delta(i).stateSpace();
if( Ds > max_Delta_size )
max_Delta_size = Ds;
cavsum_lcbp += di.stateSpace();