git master
----------
+* [Benjamin Piwowarski] Renamed "foreach" macro into "bforeach" to avoid conflicts with newer Boost library versions
* Optimized ClusterGraph( const FactorGraph&, bool) constructor
* Fixed "typename" bug in src/alldai.cpp which resulted in FTBFS for older g++ compilers
* Fixed memory leak in alldai.cpp and removed the function builtinInfAlgs()
for( size_t n1 = 0; n1 < G.nrNodes1(); n1++ ) {
cout << "Node " << n1 << " of type 1 has " << G.nb1(n1).size() << " neighbors:" << endl;
// Iterate over all neighbors n2 of n1
- foreach( const Neighbor &n2, G.nb1(n1) ) {
+ bforeach( const Neighbor &n2, G.nb1(n1) ) {
// The n2.iter'th neighbor of n1 is n2:
DAI_ASSERT( G.nb1(n1)[n2.iter] == n2 );
for( size_t n1 = 0; n1 < nrNodes1(); n1++ ) {
if( nb1(n1).size() != x.nb1(n1).size() )
return false;
- foreach( const Neighbor &n2, nb1(n1) )
+ bforeach( const Neighbor &n2, nb1(n1) )
if( !x.hasEdge( n1, n2 ) )
return false;
- foreach( const Neighbor &n2, x.nb1(n1) )
+ bforeach( const Neighbor &n2, x.nb1(n1) )
if( !hasEdge( n1, n2 ) )
return false;
}
/// Returns union of clusters that contain the \a i 'th variable
VarSet Delta( size_t i ) const {
VarSet result;
- foreach( const Neighbor& I, _G.nb1(i) )
+ bforeach( const Neighbor& I, _G.nb1(i) )
result |= _clusters[I];
return result;
}
if( i1 == i2 )
return false;
bool result = false;
- foreach( const Neighbor& I, _G.nb1(i1) )
+ bforeach( const Neighbor& I, _G.nb1(i1) )
if( find( _G.nb2(I).begin(), _G.nb2(I).end(), i2 ) != _G.nb2(I).end() ) {
result = true;
break;
const VarSet & clI = _clusters[I];
bool maximal = true;
// The following may not be optimal, since it may repeatedly test the same cluster *J
- foreach( const Neighbor& i, _G.nb2(I) ) {
- foreach( const Neighbor& J, _G.nb1(i) )
+ bforeach( const Neighbor& i, _G.nb2(I) ) {
+ bforeach( const Neighbor& J, _G.nb1(i) )
if( (J != I) && (clI << _clusters[J]) ) {
maximal = false;
break;
for( size_t n1 = 0; n1 < nrNodes(); n1++ ) {
if( pa(n1).size() != x.pa(n1).size() )
return false;
- foreach( const Neighbor &n2, pa(n1) )
+ bforeach( const Neighbor &n2, pa(n1) )
if( !x.hasEdge( n2, n1 ) )
return false;
- foreach( const Neighbor &n2, x.pa(n1) )
+ bforeach( const Neighbor &n2, x.pa(n1) )
if( !hasEdge( n2, n1 ) )
return false;
}
* \code
* for( size_t i = 0; i < nrNodes(); ++i ) {
* size_t _j = 0;
- * foreach( const Neighbor &j, nb(i) ) {
+ * bforeach( const Neighbor &j, nb(i) ) {
* assert( j == nb(i,j.iter) );
* assert( nb(j.node,j.dual).node == i );
* assert( _j = j.iter );
for( size_t n1 = 0; n1 < nrNodes(); n1++ ) {
if( nb(n1).size() != x.nb(n1).size() )
return false;
- foreach( const Neighbor &n2, nb(n1) )
+ bforeach( const Neighbor &n2, nb(n1) )
if( !x.hasEdge( n1, n2 ) )
return false;
- foreach( const Neighbor &n2, x.nb(n1) )
+ bforeach( const Neighbor &n2, x.nb(n1) )
if( !hasEdge( n1, n2 ) )
return false;
}
/// Set properties according to \a newProps, overriding properties that already exist with new values
PropertySet& set( const PropertySet& newProps ) {
const std::map<PropertyKey, PropertyValue> *m = &newProps;
- foreach(value_type i, *m)
+ bforeach(value_type i, *m)
set( i.first, i.second );
return *this;
}
#endif
-/// An alias to the BOOST_FOREACH macro from the boost::foreach library
-#define foreach BOOST_FOREACH
+/// An alias to the BOOST_FOREACH macro from the boost::bforeach library
+#define bforeach BOOST_FOREACH
#ifdef DAI_DEBUG
/// \brief "Print variable". Prints the text of an expression, followed by its value (only if DAI_DEBUG is defined)
#include <set>
#include <limits>
#include <climits> // Work-around for bug in boost graph library
-#include <dai/util.h>
-#include <dai/exceptions.h>
-#include <dai/graph.h>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
+#include <dai/util.h>
+#include <dai/exceptions.h>
+#include <dai/graph.h>
+
namespace dai {
/// Construct from GraphAL
GraphEL( const GraphAL& G ) {
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ )
- foreach( const Neighbor n2, G.nb(n1) )
+ bforeach( const Neighbor n2, G.nb(n1) )
if( n1 < n2 )
insert( UEdge( n1, n2 ) );
}
for( size_t i = 0; i < _fg->nrVars(); i++ ) {
_indices[i].clear();
_indices[i].reserve( _fg->nbV(i).size() );
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
_ind_t index;
index.reserve( _fg->factor(I).nrStates() );
for( IndexFor k(_fg->var(i), _fg->factor(I).vars()); k.valid(); ++k )
_Tmsg.resize( _fg->nrVars() );
for( size_t i = 0; i < _fg->nrVars(); i++ ) {
_Tmsg[i].resize( _fg->nbV(i).size() );
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
Prob prod( _fg->var(i).states(), 1.0 );
- foreach( const Neighbor &J, _fg->nbV(i) )
+ bforeach( const Neighbor &J, _fg->nbV(i) )
if( J.node != I.node )
prod *= _bp_dual.msgM( i, J.iter );
_Tmsg[i][I.iter] = prod;
_Umsg.resize( _fg->nrFactors() );
for( size_t I = 0; I < _fg->nrFactors(); I++ ) {
_Umsg[I].resize( _fg->nbF(I).size() );
- foreach( const Neighbor &i, _fg->nbF(I) ) {
+ bforeach( const Neighbor &i, _fg->nbF(I) ) {
Prob prod( _fg->factor(I).nrStates(), 1.0 );
- foreach( const Neighbor &j, _fg->nbF(I) )
+ bforeach( const Neighbor &j, _fg->nbF(I) )
if( i.node != j.node ) {
Prob n_jI( _bp_dual.msgN( j, j.dual ) );
const _ind_t &ind = _index( j, j.dual );
_Smsg.resize( _fg->nrVars() );
for( size_t i = 0; i < _fg->nrVars(); i++ ) {
_Smsg[i].resize( _fg->nbV(i).size() );
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
_Smsg[i][I.iter].resize( _fg->nbF(I).size() );
- foreach( const Neighbor &j, _fg->nbF(I) )
+ bforeach( const Neighbor &j, _fg->nbF(I) )
if( i != j ) {
Factor prod( _fg->factor(I) );
- foreach( const Neighbor &k, _fg->nbF(I) ) {
+ bforeach( const Neighbor &k, _fg->nbF(I) ) {
if( k != i && k.node != j.node ) {
const _ind_t &ind = _index( k, k.dual );
Prob p( _bp_dual.msgN( k, k.dual ) );
_Rmsg.resize( _fg->nrFactors() );
for( size_t I = 0; I < _fg->nrFactors(); I++ ) {
_Rmsg[I].resize( _fg->nbF(I).size() );
- foreach( const Neighbor &i, _fg->nbF(I) ) {
+ bforeach( const Neighbor &i, _fg->nbF(I) ) {
_Rmsg[I][i.iter].resize( _fg->nbV(i).size() );
- foreach( const Neighbor &J, _fg->nbV(i) ) {
+ bforeach( const Neighbor &J, _fg->nbV(i) ) {
if( I != J ) {
Prob prod( _fg->var(i).states(), 1.0 );
- foreach( const Neighbor &K, _fg->nbV(i) )
+ bforeach( const Neighbor &K, _fg->nbV(i) )
if( K.node != I && K.node != J.node )
prod *= _bp_dual.msgM( i, K.iter );
_Rmsg[I][i.iter][J.iter] = prod;
for( size_t i = 0; i < _fg->nrVars(); i++ ) {
Prob p( _adj_b_V_unnorm[i] );
DAI_ASSERT( p.size() == _fg->var(i).states() );
- foreach( const Neighbor &I, _fg->nbV(i) )
+ bforeach( const Neighbor &I, _fg->nbV(i) )
p *= _bp_dual.msgM( i, I.iter );
p += _init_adj_psi_V[i];
_adj_psi_V.push_back( p );
for( size_t I = 0; I < _fg->nrFactors(); I++ ) {
Prob p( _adj_b_F_unnorm[I] );
DAI_ASSERT( p.size() == _fg->factor(I).nrStates() );
- foreach( const Neighbor &i, _fg->nbF(I) ) {
+ bforeach( const Neighbor &i, _fg->nbF(I) ) {
Prob n_iI( _bp_dual.msgN( i, i.dual ) );
const _ind_t& ind = _index( i, i.dual );
// multiply prod with n_jI
_adj_m_unnorm[i].resize( n_i );
_new_adj_n[i].resize( n_i );
_new_adj_m[i].resize( n_i );
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
{ // calculate adj_n
Prob prod( _fg->factor(I).p() );
prod *= _adj_b_F_unnorm[I];
- foreach( const Neighbor &j, _fg->nbF(I) )
+ bforeach( const Neighbor &j, _fg->nbF(I) )
if( i != j ) {
Prob n_jI( _bp_dual.msgN( j, j.dual ) );
const _ind_t &ind = _index( j, j.dual );
{ // calculate adj_m
Prob prod( _adj_b_V_unnorm[i] );
DAI_ASSERT( prod.size() == _fg->var(i).states() );
- foreach( const Neighbor &J, _fg->nbV(i) )
+ bforeach( const Neighbor &J, _fg->nbV(i) )
if( J.node != I.node )
prod *= _bp_dual.msgM(i,J.iter);
_new_adj_m[i][I.iter] = prod;
_adj_m[i].resize( n_i );
_adj_m_unnorm[i].resize( n_i );
_new_adj_m[i].resize( n_i );
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
// calculate adj_m
Prob prod( _adj_b_V_unnorm[i] );
DAI_ASSERT( prod.size() == _fg->var(i).states() );
- foreach( const Neighbor &J, _fg->nbV(i) )
+ bforeach( const Neighbor &J, _fg->nbV(i) )
if( J.node != I.node )
prod *= _bp_dual.msgM( i, J.iter );
_adj_m[i][I.iter] = prod;
}
}
for( size_t i = 0; i < _fg->nrVars(); i++ ) {
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
// calculate adj_n
Prob prod( _fg->factor(I).p() );
prod *= _adj_b_F_unnorm[I];
- foreach( const Neighbor &j, _fg->nbF(I) )
+ bforeach( const Neighbor &j, _fg->nbF(I) )
if( i != j ) {
Prob n_jI( _bp_dual.msgN( j, j.dual) );
const _ind_t& ind = _index( j, j.dual );
Prob &new_adj_n_iI = _new_adj_n[i][_I];
new_adj_n_iI = Prob( _fg->var(i).states(), 0.0 );
size_t I = _fg->nbV(i)[_I];
- foreach( const Neighbor &j, _fg->nbF(I) )
+ bforeach( const Neighbor &j, _fg->nbF(I) )
if( j != i ) {
const Prob &p = _Smsg[i][_I][j.iter];
const Prob &_adj_m_unnorm_jI = _adj_m_unnorm[j][j.dual];
*/
_new_adj_m[i][_I] = Prob( _fg->var(i).states(), 0.0 );
- foreach( const Neighbor &J, _fg->nbV(i) )
+ bforeach( const Neighbor &J, _fg->nbV(i) )
if( J != I )
_new_adj_m[i][_I] += _Rmsg[I][I.dual][J.iter] * _adj_n_unnorm[i][J.iter];
}
void BBP::doParUpdate() {
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
calcNewM( i, I.iter );
calcNewN( i, I.iter );
}
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
upMsgM( i, I.iter );
upMsgN( i, I.iter );
}
DAI_PV(_fg->factor(I).p());
}
#endif
- foreach( const Neighbor &J, _fg->nbV(i) ) {
+ bforeach( const Neighbor &J, _fg->nbV(i) ) {
if( J.node != I.node ) {
#if 0
if(f_unnorm.sumAbs() > pv_thresh) {
// DAI_PV(I);
// DAI_PV(_I);
// DAI_PV(_fg->nbF(I).size());
- foreach( const Neighbor &i, _fg->nbF(I) ) {
+ bforeach( const Neighbor &i, _fg->nbF(I) ) {
if( i.node != j ) {
const Prob &S = _Smsg[i][i.dual][_j];
Prob msg( _fg->var(i).states(), 0.0 );
Real s = 0.0;
size_t e = 0;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
s += _adj_m_unnorm[i][I.iter].sumAbs();
s += _adj_n_unnorm[i][I.iter].sumAbs();
e++;
new_s = 0.0;
size_t e = 0;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
s += _adj_m[i][I.iter].sumAbs();
s += _adj_n[i][I.iter].sumAbs();
new_s += _new_adj_m[i][I.iter].sumAbs();
void BBP::getArgmaxMsgM( size_t &out_i, size_t &out__I, Real &mag ) {
bool found = false;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) ) {
+ bforeach( const Neighbor &I, _fg->nbV(i) ) {
Real thisMag = _adj_m[i][I.iter].sumAbs();
if( !found || mag < thisMag ) {
found = true;
Real BBP::getTotalMsgM() {
Real mag = 0.0;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) )
+ bforeach( const Neighbor &I, _fg->nbV(i) )
mag += _adj_m[i][I.iter].sumAbs();
return mag;
}
Real BBP::getTotalNewMsgM() {
Real mag = 0.0;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) )
+ bforeach( const Neighbor &I, _fg->nbV(i) )
mag += _new_adj_m[i][I.iter].sumAbs();
return mag;
}
Real BBP::getTotalMsgN() {
Real mag = 0.0;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) )
+ bforeach( const Neighbor &I, _fg->nbV(i) )
mag += _adj_n[i][I.iter].sumAbs();
return mag;
}
break;
for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) )
+ bforeach( const Neighbor &I, _fg->nbV(i) )
sendSeqMsgM( i, I.iter );
/* for( size_t i = 0; i < _fg->nrVars(); i++ )
- foreach( const Neighbor &I, _fg->nbV(i) )
+ bforeach( const Neighbor &I, _fg->nbV(i) )
updateSeqMsgM( i, I.iter );*/
} while( mag > tol && _iters < props.maxiter );
// for each variable i
for(size_t i=0; i<bp_dual.nrVars(); i++) {
// for each factor I ~ i
- foreach(size_t I, bp_dual.nbV(i)) {
+ bforeach(size_t I, bp_dual.nbV(i)) {
vector<Real> adj_n_est;
// for each value xi
for(size_t xi=0; xi<bp_dual.var(i).states(); xi++) {
bool exists = false;
if( check ) {
// Check whether the edge already exists
- foreach( const Neighbor &nb2, nb1(n1) )
+ bforeach( const Neighbor &nb2, nb1(n1) )
if( nb2 == n2 ) {
exists = true;
break;
SmallSet<size_t> BipartiteGraph::nb1Set( size_t n1 ) const {
SmallSet<size_t> result;
- foreach( const Neighbor &n2, nb1(n1) )
+ bforeach( const Neighbor &n2, nb1(n1) )
result |= n2;
return result;
}
SmallSet<size_t> BipartiteGraph::nb2Set( size_t n2 ) const {
SmallSet<size_t> result;
- foreach( const Neighbor &n1, nb2(n2) )
+ bforeach( const Neighbor &n1, nb2(n2) )
result |= n1;
return result;
}
SmallSet<size_t> BipartiteGraph::delta1( size_t n1, bool include ) const {
// get all second-order neighbors
SmallSet<size_t> result;
- foreach( const Neighbor &n2, nb1(n1) )
- foreach( const Neighbor &m1, nb2(n2) )
+ bforeach( const Neighbor &n2, nb1(n1) )
+ bforeach( const Neighbor &m1, nb2(n2) )
if( include || (m1 != n1) )
result |= m1;
return result;
SmallSet<size_t> BipartiteGraph::delta2( size_t n2, bool include ) const {
// store all second-order neighbors
SmallSet<size_t> result;
- foreach( const Neighbor &n1, nb2(n2) )
- foreach( const Neighbor &m2, nb1(n1) )
+ bforeach( const Neighbor &n1, nb2(n2) )
+ bforeach( const Neighbor &m2, nb1(n1) )
if( include || (m2 != n2) )
result |= m2;
return result;
vector<E> edges;
edges.reserve( nrEdges() );
for( size_t n1 = 0; n1 < nrNodes1(); n1++ )
- foreach( const Neighbor &n2, nb1(n1) )
+ bforeach( const Neighbor &n2, nb1(n1) )
edges.push_back( E( n1, n2.node + N ) );
boostGraph g( edges.begin(), edges.end(), nrNodes1() + nrNodes2() );
// For all nodes of type 2, check if they are connected with the (growing) component
for( size_t n2 = 0; n2 < nrNodes2(); n2++ )
if( !incomponent2[n2] ) {
- foreach( const Neighbor &n1, nb2(n2) ) {
+ bforeach( const Neighbor &n1, nb2(n2) ) {
if( incomponent1[n1] ) {
found_new_nodes = true;
incomponent2[n2] = true;
// For all nodes of type 1, check if they are connected with the (growing) component
for( size_t n1 = 0; n1 < nrNodes1(); n1++ )
if( !incomponent1[n1] ) {
- foreach( const Neighbor &n2, nb1(n1) ) {
+ bforeach( const Neighbor &n2, nb1(n1) ) {
if( incomponent2[n2] ) {
found_new_nodes = true;
incomponent1[n1] = true;
newLevel.ind1 = vector<size_t>( 1, n1 );
// add all neighbors of n1 to ind2
newLevel.ind2.reserve( nb1(n1).size() );
- foreach( const Neighbor &n2, nb1(n1) )
+ bforeach( const Neighbor &n2, nb1(n1) )
newLevel.ind2.push_back( n2 );
} else {
const levelType &prevLevel = levels.back();
// build newLevel.ind1
- foreach( size_t n2, prevLevel.ind2 ) { // for all n2 in the previous level
- foreach( const Neighbor &n1, nb2(n2) ) { // for all neighbors n1 of n2
+ bforeach( size_t n2, prevLevel.ind2 ) { // for all n2 in the previous level
+ bforeach( const Neighbor &n1, nb2(n2) ) { // for all neighbors n1 of n2
if( find( prevLevel.ind1.begin(), prevLevel.ind1.end(), n1 ) == prevLevel.ind1.end() ) { // n1 not in previous level
if( find( newLevel.ind1.begin(), newLevel.ind1.end(), n1 ) != newLevel.ind1.end() )
foundCycle = true; // n1 already in new level: we found a cycle
break;
}
// build newLevel.ind2
- foreach( size_t n1, newLevel.ind1 ) { // for all n1 in this level
- foreach( const Neighbor &n2, nb1(n1) ) { // for all neighbors n2 of n1
+ bforeach( size_t n1, newLevel.ind1 ) { // for all n1 in this level
+ bforeach( const Neighbor &n2, nb1(n1) ) { // for all neighbors n2 of n1
if( find( prevLevel.ind2.begin(), prevLevel.ind2.end(), n2 ) == prevLevel.ind2.end() ) { // n2 not in previous level
if( find( newLevel.ind2.begin(), newLevel.ind2.end(), n2 ) != newLevel.ind2.end() )
foundCycle = true; // n2 already in new level: we found a cycle
for( size_t n2 = 0; n2 < nrNodes2(); n2++ )
os << "\ty" << n2 << ";" << endl;
for( size_t n1 = 0; n1 < nrNodes1(); n1++ )
- foreach( const Neighbor &n2, nb1(n1) )
+ bforeach( const Neighbor &n2, nb1(n1) )
os << "\tx" << n1 << " -- y" << n2 << ";" << endl;
os << "}" << endl;
}
size_t N2 = nrNodes2();
for( size_t n1 = 0; n1 < N1; n1++ ) {
size_t iter = 0;
- foreach( const Neighbor &n2, nb1(n1) ) {
+ bforeach( const Neighbor &n2, nb1(n1) ) {
DAI_ASSERT( n2.iter == iter );
DAI_ASSERT( n2.node < N2 );
DAI_ASSERT( n2.dual < nb2(n2).size() );
}
for( size_t n2 = 0; n2 < N2; n2++ ) {
size_t iter = 0;
- foreach( const Neighbor &n1, nb2(n2) ) {
+ bforeach( const Neighbor &n1, nb2(n2) ) {
DAI_ASSERT( n1.iter == iter );
DAI_ASSERT( n1.node < N1 );
DAI_ASSERT( n1.dual < nb1(n1).size() );
_edge2lut.push_back( vector<LutType::iterator>() );
_edge2lut[i].reserve( nbV(i).size() );
}
- foreach( const Neighbor &I, nbV(i) ) {
+ bforeach( const Neighbor &I, nbV(i) ) {
EdgeProp newEP;
newEP.message = Prob( var(i).states() );
newEP.newMessage = Prob( var(i).states() );
_updateSeq.clear();
_updateSeq.reserve( nrEdges() );
for( size_t I = 0; I < nrFactors(); I++ )
- foreach( const Neighbor &i, nbF(I) )
+ bforeach( const Neighbor &i, nbF(I) )
_updateSeq.push_back( Edge( i, i.dual ) );
}
void BP::init() {
Real c = props.logdomain ? 0.0 : 1.0;
for( size_t i = 0; i < nrVars(); ++i ) {
- foreach( const Neighbor &I, nbV(i) ) {
+ bforeach( const Neighbor &I, nbV(i) ) {
message( i, I.iter ).fill( c );
newMessage( i, I.iter ).fill( c );
if( props.updates == Properties::UpdateType::SEQMAX )
prod.takeLog();
// Calculate product of incoming messages and factor I
- foreach( const Neighbor &j, nbF(I) )
+ bforeach( const Neighbor &j, nbF(I) )
if( !(without_i && (j == i)) ) {
// prod_j will be the product of messages coming into j
Prob prod_j( var(j).states(), props.logdomain ? 0.0 : 1.0 );
- foreach( const Neighbor &J, nbV(j) )
+ bforeach( const Neighbor &J, nbV(j) )
if( J != I ) { // for all J in nb(j) \ I
if( props.logdomain )
prod_j += message( j, J.iter );
if( _iters == 0 ) {
// do the first pass
for( size_t i = 0; i < nrVars(); ++i )
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
calcNewMessage( i, I.iter );
}
// Maximum-Residual BP [\ref EMK06]
// I->i has been updated, which means that residuals for all
// J->j with J in nb[i]\I and j in nb[J]\i have to be updated
- foreach( const Neighbor &J, nbV(i) ) {
+ bforeach( const Neighbor &J, nbV(i) ) {
if( J.iter != _I ) {
- foreach( const Neighbor &j, nbF(J) ) {
+ bforeach( const Neighbor &j, nbF(J) ) {
size_t _J = j.dual;
if( j != i )
calcNewMessage( j, _J );
} else if( props.updates == Properties::UpdateType::PARALL ) {
// Parallel updates
for( size_t i = 0; i < nrVars(); ++i )
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
calcNewMessage( i, I.iter );
for( size_t i = 0; i < nrVars(); ++i )
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
updateMessage( i, I.iter );
} else {
// Sequential updates
if( props.updates == Properties::UpdateType::SEQRND )
random_shuffle( _updateSeq.begin(), _updateSeq.end(), rnd );
- foreach( const Edge &e, _updateSeq ) {
+ bforeach( const Edge &e, _updateSeq ) {
calcNewMessage( e.first, e.second );
updateMessage( e.first, e.second );
}
void BP::calcBeliefV( size_t i, Prob &p ) const {
p = Prob( var(i).states(), props.logdomain ? 0.0 : 1.0 );
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
if( props.logdomain )
p += newMessage( i, I.iter );
else
void BP::init( const VarSet &ns ) {
for( VarSet::const_iterator n = ns.begin(); n != ns.end(); ++n ) {
size_t ni = findVar( *n );
- foreach( const Neighbor &I, nbV( ni ) ) {
+ bforeach( const Neighbor &I, nbV( ni ) ) {
Real val = props.logdomain ? 0.0 : 1.0;
message( ni, I.iter ).fill( val );
newMessage( ni, I.iter ).fill( val );
// calculate 'n' messages from "factor marginal / factor"
for( size_t I = 0; I < fg().nrFactors(); I++ ) {
Factor f = _ia->beliefF(I) / fg().factor(I);
- foreach( const Neighbor &i, fg().nbF(I) )
+ bforeach( const Neighbor &i, fg().nbF(I) )
msgN(i, i.dual) = f.marginal( fg().var(i) ).p();
}
// calculate 'm' messages and normalizers from 'n' messages
for( size_t i = 0; i < fg().nrVars(); i++ )
- foreach( const Neighbor &I, fg().nbV(i) )
+ bforeach( const Neighbor &I, fg().nbV(i) )
calcNewM( i, I.iter );
// recalculate 'n' messages and normalizers from 'm' messages
for( size_t i = 0; i < fg().nrVars(); i++ )
- foreach( const Neighbor &I, fg().nbV(i) )
+ bforeach( const Neighbor &I, fg().nbV(i) )
calcNewN(i, I.iter);
}
// calculate updated message I->i
const Neighbor &I = fg().nbV(i)[_I];
Prob prod( fg().factor(I).p() );
- foreach( const Neighbor &j, fg().nbF(I) )
+ bforeach( const Neighbor &j, fg().nbF(I) )
if( j != i ) { // for all j in I \ i
Prob &n = msgN(j,j.dual);
IndexFor ind( fg().var(j), fg().factor(I).vars() );
// calculate updated message i->I
const Neighbor &I = fg().nbV(i)[_I];
Prob prod( fg().var(i).states(), 1.0 );
- foreach( const Neighbor &J, fg().nbV(i) )
+ bforeach( const Neighbor &J, fg().nbV(i) )
if( J.node != I.node ) // for all J in i \ I
prod *= msgM(i,J.iter);
_msgs.Zn[i][_I] = prod.normalize();
void BP_dual::calcBeliefV( size_t i ) {
Prob prod( fg().var(i).states(), 1.0 );
- foreach( const Neighbor &I, fg().nbV(i) )
+ bforeach( const Neighbor &I, fg().nbV(i) )
prod *= msgM(i,I.iter);
_beliefs.Zb1[i] = prod.normalize();
_beliefs.b1[i] = prod;
void BP_dual::calcBeliefF( size_t I ) {
Prob prod( fg().factor(I).p() );
- foreach( const Neighbor &j, fg().nbF(I) ) {
+ bforeach( const Neighbor &j, fg().nbF(I) ) {
IndexFor ind( fg().var(j), fg().factor(I).vars() );
Prob n( msgN(j,j.dual) );
for( size_t x = 0; ind.valid(); x++, ++ind )
*_clamp_ofstream << choose_count << "\t" << clamped_vars_list.size() << "\t" << i << "\t" << xis[0] << endl;
if( clampingVar )
- foreach( size_t xi, xis )
+ bforeach( size_t xi, xis )
DAI_ASSERT(/*0<=xi &&*/ xi < var(i).states() );
else
- foreach( size_t xI, xis )
+ bforeach( size_t xI, xis )
DAI_ASSERT(/*0<=xI &&*/ xI < factor(i).nrStates() );
// - otherwise, clamp and recurse, saving margin estimates for each
// clamp setting. afterwards, combine estimates.
ClusterGraph::ClusterGraph( const std::vector<VarSet> & cls ) : _G(), _vars(), _clusters() {
// construct vars, clusters and edge list
vector<Edge> edges;
- foreach( const VarSet &cl, cls ) {
+ bforeach( const VarSet &cl, cls ) {
if( find( clusters().begin(), clusters().end(), cl ) == clusters().end() ) {
// add cluster
size_t n2 = nrClusters();
if( fg.isMaximal( I ) ) {
_clusters.push_back( fg.factor(I).vars() );
size_t clind = _G.addNode2();
- foreach( const Neighbor &i, fg.nbF(I) )
+ bforeach( const Neighbor &i, fg.nbF(I) )
_G.addEdge( i, clind, true );
}
} else {
bool allowed = true;
if( check ) {
// Check whether the edge already exists
- foreach( const Neighbor& n, ch(n1) )
+ bforeach( const Neighbor& n, ch(n1) )
if( n == n2 ) {
allowed = false;
break;
SmallSet<size_t> DAG::paSet( size_t n ) const {
SmallSet<size_t> result;
- foreach( const Neighbor &p, pa(n) )
+ bforeach( const Neighbor &p, pa(n) )
result |= p;
return result;
}
SmallSet<size_t> DAG::chSet( size_t n ) const {
SmallSet<size_t> result;
- foreach( const Neighbor &c, ch(n) )
+ bforeach( const Neighbor &c, ch(n) )
result |= c;
return result;
}
if( node != n )
anc.insert( node );
// Add all parents of node to curParents
- foreach( const Neighbor& p, pa(node) )
+ bforeach( const Neighbor& p, pa(node) )
curParents.insert( p.node );
// Erase node from curParents
curParents.erase( node );
if( node != n )
desc.insert( node );
// Add all children of node to curChildren
- foreach( const Neighbor& c, ch(node) )
+ bforeach( const Neighbor& c, ch(node) )
curChildren.insert( c.node );
// Erase node from curChildren
curChildren.erase( node );
if( node == n2 )
return true;
// Add all children of node to curChildren
- foreach( const Neighbor& c, ch(node) )
+ bforeach( const Neighbor& c, ch(node) )
curChildren.insert( c.node );
// Erase node from curChildren
curChildren.erase( node );
// For all nodes, check if they are connected with the (growing) component
for( size_t n1 = 0; n1 < nrNodes(); n1++ ) {
if( !incomponent[n1] ) {
- foreach( const Neighbor &n2, pa(n1) )
+ bforeach( const Neighbor &n2, pa(n1) )
if( incomponent[n2] ) {
found_new_nodes = true;
incomponent[n1] = true;
}
}
if( !incomponent[n1] ) {
- foreach( const Neighbor &n2, ch(n1) )
+ bforeach( const Neighbor &n2, ch(n1) )
if( incomponent[n2] ) {
found_new_nodes = true;
incomponent[n1] = true;
for( size_t n = 0; n < nrNodes(); n++ )
os << "\tx" << n << ";" << endl;
for( size_t n1 = 0; n1 < nrNodes(); n1++ )
- foreach( const Neighbor &n2, ch(n1) )
+ bforeach( const Neighbor &n2, ch(n1) )
os << "\tx" << n1 << " -> x" << n2 << ";" << endl;
os << "}" << endl;
}
size_t N = nrNodes();
for( size_t n1 = 0; n1 < N; n1++ ) {
size_t iter = 0;
- foreach( const Neighbor &n2, pa(n1) ) {
+ bforeach( const Neighbor &n2, pa(n1) ) {
DAI_ASSERT( n2.iter == iter );
DAI_ASSERT( n2.node < N );
DAI_ASSERT( n2.dual < ch(n2).size() );
iter++;
}
iter = 0;
- foreach( const Neighbor &n2, ch(n1) ) {
+ bforeach( const Neighbor &n2, ch(n1) ) {
DAI_ASSERT( n2.iter == iter );
DAI_ASSERT( n2.node < N );
DAI_ASSERT( n2.dual < pa(n2).size() );
}
// Check acyclicity
for( size_t n1 = 0; n1 < N; n1++ )
- foreach( const Neighbor& n2, ch(n1) )
+ bforeach( const Neighbor& n2, ch(n1) )
DAI_ASSERT( !existsDirectedPath( n2, n1 ) );
}
// First, calculate whether this state is consistent with variables that
// have been assigned already
bool allowedState = true;
- foreach( const Neighbor &j, obj.fg().nbF(I) )
+ bforeach( const Neighbor &j, obj.fg().nbF(I) )
if( visitedVars[j.node] && maximum[j.node] != s(obj.fg().var(j.node)) ) {
allowedState = false;
break;
DAI_ASSERT( obj.fg().factor(I).p()[maxState] != 0.0 );
// Decode the argmax
- foreach( const Neighbor &j, obj.fg().nbF(I) ) {
+ bforeach( const Neighbor &j, obj.fg().nbF(I) ) {
if( visitedVars[j.node] ) {
// We have already visited j earlier - hopefully our state is consistent
if( maximum[j.node] != maxState( obj.fg().var(j.node) ) )
// We found a consistent state for variable j
visitedVars[j.node] = true;
maximum[j.node] = maxState( obj.fg().var(j.node) );
- foreach( const Neighbor &J, obj.fg().nbV(j) )
+ bforeach( const Neighbor &J, obj.fg().nbV(j) )
if( !visitedFactors[J] )
scheduledFactors.push(J);
}
vector<size_t> Istate(1,0);
Istate[0] = clamped->beliefF(bestI).p().argmax().first;
map<Var, size_t> Istatemap = calcState( factor(bestI).vars(), Istate[0] );
- foreach( size_t i, bipGraph().nb2Set(bestI) & freeVars ) {
+ bforeach( size_t i, bipGraph().nb2Set(bestI) & freeVars ) {
varsToInit |= var(i);
varsToClamp |= i;
_state[i] = Istatemap[var(i)];
}
// clamp all variables scheduled for clamping
- foreach( size_t i, varsToClamp )
+ bforeach( size_t i, varsToClamp )
clamped->clamp( i, _state[i], false );
// initialize clamped for the next run
VarSet FactorGraph::Delta( size_t i ) const {
// calculate Markov Blanket
VarSet Del;
- foreach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
- foreach( const Neighbor &j, nbF(I) ) // for all neighboring variables j of I
+ bforeach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
+ bforeach( const Neighbor &j, nbF(I) ) // for all neighboring variables j of I
Del |= var(j);
return Del;
void FactorGraph::makeCavity( size_t i, bool backup ) {
// fills all Factors that include var(i) with ones
map<size_t,Factor> newFacs;
- foreach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
+ bforeach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
newFacs[I] = Factor( factor(I).vars(), (Real)1 );
setFactors( newFacs, backup );
}
for( size_t I = 0; I < nrFactors(); I++ )
os << "\tf" << I << ";" << endl;
for( size_t i = 0; i < nrVars(); i++ )
- foreach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
+ bforeach( const Neighbor &I, nbV(i) ) // for all neighboring factors I of i
os << "\tv" << var(i).label() << " -- f" << I << ";" << endl;
os << "}" << endl;
}
GraphAL FactorGraph::MarkovGraph() const {
GraphAL G( nrVars() );
for( size_t i = 0; i < nrVars(); i++ )
- foreach( const Neighbor &I, nbV(i) )
- foreach( const Neighbor &j, nbF(I) )
+ bforeach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &j, nbF(I) )
if( i < j )
G.addEdge( i, j, true );
return G;
return false;
return true;
} else {
- foreach( const Neighbor& i, nbF(I) ) {
- foreach( const Neighbor& J, nbV(i) ) {
+ bforeach( const Neighbor& i, nbF(I) ) {
+ bforeach( const Neighbor& J, nbV(i) ) {
if( J != I )
if( (factor(J).vars() >> I_vars) && (factor(J).vars().size() != I_size) )
return false;
return maximalFactor( J );
return I;
} else {
- foreach( const Neighbor& i, nbF(I) ) {
- foreach( const Neighbor& J, nbV(i) ) {
+ bforeach( const Neighbor& i, nbF(I) ) {
+ bforeach( const Neighbor& J, nbV(i) ) {
if( J != I )
if( (factor(J).vars() >> I_vars) && (factor(J).vars().size() != I_size) )
return maximalFactor( J );
mask.set( x, (Real)1 );
map<size_t, Factor> newFacs;
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
newFacs[I] = factor(I) * mask;
setFactors( newFacs, backup );
Var n = var(i);
Factor mask_n( n, (Real)0 );
- foreach( size_t i, is ) {
+ bforeach( size_t i, is ) {
DAI_ASSERT( i <= n.states() );
mask_n.set( i, (Real)1 );
}
map<size_t, Factor> newFacs;
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
newFacs[I] = factor(I) * mask_n;
setFactors( newFacs, backup );
}
size_t st = factor(I).nrStates();
Factor newF( factor(I).vars(), (Real)0 );
- foreach( size_t i, is ) {
+ bforeach( size_t i, is ) {
DAI_ASSERT( i <= st );
newF.set( i, factor(I)[i] );
}
}
for( size_t i = 0; i < nrVars(); ++i ) {
Real c_i = 0.0;
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
c_i += Weight(I);
if( c_i != 1.0 )
sum += (1.0 - c_i) * beliefV(i).entropy(); // FBP
prod ^= (1.0 / c_I); // FBP
// Calculate product of incoming messages and factor I
- foreach( const Neighbor &j, nbF(I) )
+ bforeach( const Neighbor &j, nbF(I) )
if( !(without_i && (j == i)) ) {
// prod_j will be the product of messages coming into j
// FBP: corresponds to messages n_jI
Prob prod_j( var(j).states(), props.logdomain ? 0.0 : 1.0 );
- foreach( const Neighbor &J, nbV(j) )
+ bforeach( const Neighbor &J, nbV(j) )
if( J != I ) { // for all J in nb(j) \ I
if( props.logdomain )
prod_j += message( j, J.iter );
Prob i_given_MB( i_states, 1.0 );
// use Markov blanket of var(i) to calculate distribution
- foreach( const Neighbor &I, nbV(i) ) {
+ bforeach( const Neighbor &I, nbV(i) ) {
const Factor &f_I = factor(I);
size_t I_skip = getFactorEntryDiff( I, i );
size_t I_entry = getFactorEntry(I) - (_state[i] * I_skip);
bool exists = false;
if( check ) {
// Check whether the edge already exists
- foreach( const Neighbor &n, nb(n1) )
+ bforeach( const Neighbor &n, nb(n1) )
if( n == n2 ) {
exists = true;
break;
SmallSet<size_t> GraphAL::nbSet( size_t n ) const {
SmallSet<size_t> result;
- foreach( const Neighbor &m, nb(n) )
+ bforeach( const Neighbor &m, nb(n) )
result |= m;
return result;
}
// For all nodes, check if they are connected with the (growing) component
for( size_t n1 = 0; n1 < nrNodes(); n1++ )
if( !incomponent[n1] ) {
- foreach( const Neighbor &n2, nb(n1) ) {
+ bforeach( const Neighbor &n2, nb(n1) ) {
if( incomponent[n2] ) {
found_new_nodes = true;
incomponent[n1] = true;
vector<E> edges;
edges.reserve( nrEdges() );
for( size_t n1 = 0; n1 < nrNodes(); n1++ )
- foreach( const Neighbor &n2, nb(n1) )
+ bforeach( const Neighbor &n2, nb(n1) )
if( n1 < n2 )
edges.push_back( E( n1, n2 ) );
boostGraphAL g( edges.begin(), edges.end(), nrNodes() );
// (without backtracking), aborting if a cycle is detected
for( size_t e = 0; e < prevLevel.size(); e++ ) {
size_t n2 = prevLevel[e].first; // for all nodes n2 in the previous level
- foreach( const Neighbor &n1, nb(n2) ) { // for all neighbors n1 of n2
+ bforeach( const Neighbor &n1, nb(n2) ) { // for all neighbors n1 of n2
if( n1 != prevLevel[e].second ) { // no backtracking allowed
for( size_t l = 0; l < levels.size() && !foundCycle; l++ )
for( size_t f = 0; f < levels[l].size() && !foundCycle; f++ )
for( size_t n = 0; n < nrNodes(); n++ )
os << "\tx" << n << ";" << endl;
for( size_t n1 = 0; n1 < nrNodes(); n1++ )
- foreach( const Neighbor &n2, nb(n1) )
+ bforeach( const Neighbor &n2, nb(n1) )
if( n1 < n2 )
os << "\tx" << n1 << " -- x" << n2 << ";" << endl;
os << "}" << endl;
size_t N = nrNodes();
for( size_t n1 = 0; n1 < N; n1++ ) {
size_t iter = 0;
- foreach( const Neighbor &n2, nb(n1) ) {
+ bforeach( const Neighbor &n2, nb(n1) ) {
DAI_ASSERT( n2.iter == iter );
DAI_ASSERT( n2.node < N );
DAI_ASSERT( n2.dual < nb(n2).size() );
_muba.push_back( vector<Factor>() );
_muab[alpha].reserve( nbOR(alpha).size() );
_muba[alpha].reserve( nbOR(alpha).size() );
- foreach( const Neighbor &beta, nbOR(alpha) ) {
+ bforeach( const Neighbor &beta, nbOR(alpha) ) {
_muab[alpha].push_back( Factor( IR(beta) ) );
_muba[alpha].push_back( Factor( IR(beta) ) );
}
_Qb[beta].fill( 1.0 );
else
_Qb[beta].randomize();
- foreach( const Neighbor &alpha, nbIR(beta) ) {
+ bforeach( const Neighbor &alpha, nbIR(beta) ) {
size_t _beta = alpha.dual;
if( props.init == Properties::InitType::UNIFORM ) {
muab( alpha, _beta ).fill( 1.0 );
_Qb[beta].randomize();
for( size_t alpha = 0; alpha < nrORs(); alpha++ )
- foreach( const Neighbor &beta, nbOR(alpha) ) {
+ bforeach( const Neighbor &beta, nbOR(alpha) ) {
size_t _beta = beta.iter;
if( props.init == Properties::InitType::UNIFORM ) {
muab( alpha, _beta ).setUniform();
Real maxDiff = INFINITY;
for( _iters = 0; _iters < props.maxiter && maxDiff > props.tol; _iters++ ) {
for( size_t beta = 0; beta < nrIRs(); beta++ ) {
- foreach( const Neighbor &alpha, nbIR(beta) ) {
+ bforeach( const Neighbor &alpha, nbIR(beta) ) {
size_t _beta = alpha.dual;
muab( alpha, _beta ) = _Qa[alpha].marginal(IR(beta)) / muba(alpha,_beta);
/* TODO: INVESTIGATE THIS PROBLEM
}
Factor Qb_new;
- foreach( const Neighbor &alpha, nbIR(beta) ) {
+ bforeach( const Neighbor &alpha, nbIR(beta) ) {
size_t _beta = alpha.dual;
Qb_new *= muab(alpha,_beta) ^ (1 / (nbIR(beta).size() + IR(beta).c()));
}
else
_Qb[beta] = (Qb_new^(1.0 - props.damping)) * (_Qb[beta]^props.damping);
- foreach( const Neighbor &alpha, nbIR(beta) ) {
+ bforeach( const Neighbor &alpha, nbIR(beta) ) {
size_t _beta = alpha.dual;
muba(alpha,_beta) = _Qb[beta] / muab(alpha,_beta);
*/
Factor Qa_new = OR(alpha);
- foreach( const Neighbor &gamma, nbOR(alpha) )
+ bforeach( const Neighbor &gamma, nbOR(alpha) )
Qa_new *= muba(alpha,gamma.iter);
Qa_new ^= (1.0 / OR(alpha).c());
Qa_new.normalize();
// Calculate new outer regions
for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
OR(alpha) = org_ORs[alpha];
- foreach( const Neighbor &beta, nbOR(alpha) )
+ bforeach( const Neighbor &beta, nbOR(alpha) )
OR(alpha) *= _Qb[beta] ^ ((IR(beta).c() - org_IR_cs[beta]) / nbIR(beta).size());
}
// Estimate memory needed (rough upper bound)
BigInt memneeded = 0;
- foreach( const VarSet& cl, ElimVec )
+ bforeach( const VarSet& cl, ElimVec )
memneeded += cl.nrStates();
memneeded *= sizeof(Real) * fudge;
if( props.verbose >= 1 ) {
for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
_mes.push_back( vector<Factor>() );
_mes[alpha].reserve( nbOR(alpha).size() );
- foreach( const Neighbor &beta, nbOR(alpha) )
+ bforeach( const Neighbor &beta, nbOR(alpha) )
_mes[alpha].push_back( Factor( IR(beta), 1.0 ) );
}
size_t _e = nbIR(e)[0].dual;
Factor msg = OR(i);
- foreach( const Neighbor &k, nbOR(i) )
+ bforeach( const Neighbor &k, nbOR(i) )
if( k != e )
msg *= message( i, k.iter );
if( props.inference == Properties::InfType::SUMPROD )
size_t _e = nbIR(e)[1].dual;
Factor msg = OR(i);
- foreach( const Neighbor &k, nbOR(i) )
+ bforeach( const Neighbor &k, nbOR(i) )
if( k != e )
msg *= message( i, k.iter );
if( props.inference == Properties::InfType::SUMPROD )
// Calculate beliefs
for( size_t alpha = 0; alpha < nrORs(); alpha++ ) {
Factor piet = OR(alpha);
- foreach( const Neighbor &k, nbOR(alpha) )
+ bforeach( const Neighbor &k, nbOR(alpha) )
piet *= message( alpha, k.iter );
if( nrIRs() == 0 ) {
_logZ += log( piet.normalize() );
// First, calculate whether this state is consistent with variables that
// have been assigned already
bool allowedState = true;
- foreach( const Var& j, OR(alpha).vars() ) {
+ bforeach( const Var& j, OR(alpha).vars() ) {
size_t j_index = findVar(j);
if( visitedVars[j_index] && maximum[j_index] != s(j) ) {
allowedState = false;
DAI_ASSERT( Qa[alpha][maxState] != 0.0 );
// Decode the argmax
- foreach( const Var& j, OR(alpha).vars() ) {
+ bforeach( const Var& j, OR(alpha).vars() ) {
size_t j_index = findVar(j);
if( visitedVars[j_index] ) {
// We have already visited j earlier - hopefully our state is consistent
// We found a consistent state for variable j
visitedVars[j_index] = true;
maximum[j_index] = maxState( j );
- foreach( const Neighbor &beta, nbOR(alpha) )
- foreach( const Neighbor &alpha2, nbIR(beta) )
+ bforeach( const Neighbor &beta, nbOR(alpha) )
+ bforeach( const Neighbor &alpha2, nbIR(beta) )
if( !visitedORs[alpha2] )
scheduledORs.push(alpha2);
}
for( size_t i = 0; i < nrVars(); i++ ) {
_phis.push_back( vector<Factor>() );
_phis[i].reserve( nbV(i).size() );
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
_phis[i].push_back( Factor( factor(I).vars() / var(i) ) );
}
void LC::init() {
for( size_t i = 0; i < nrVars(); ++i )
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
if( props.updates == Properties::UpdateType::SEQRND )
_phis[i][I.iter].randomize();
else
for( size_t i = 0; i < nrVars(); i++ ) {
_pancakes[i] = _cavitydists[i];
- foreach( const Neighbor &I, nbV(i) ) {
+ bforeach( const Neighbor &I, nbV(i) ) {
_pancakes[i] *= factor(I);
if( props.updates == Properties::UpdateType::SEQRND )
_pancakes[i] *= _phis[i][I.iter];
vector<Edge> update_seq;
update_seq.reserve( nredges );
for( size_t i = 0; i < nrVars(); ++i )
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
update_seq.push_back( Edge( i, I.iter ) );
// do several passes over the network until maximum number of iterations has
Factor MF::calcNewBelief( size_t i ) {
Factor result;
- foreach( const Neighbor &I, nbV(i) ) {
+ bforeach( const Neighbor &I, nbV(i) ) {
Factor belief_I_minus_i;
- foreach( const Neighbor &j, nbF(I) ) // for all j in I \ i
+ bforeach( const Neighbor &j, nbF(I) ) // for all j in I \ i
if( j != i )
belief_I_minus_i *= _beliefs[j];
Factor f_I = factor(I);
random_shuffle( update_seq.begin(), update_seq.end(), rnd );
maxDiff = -INFINITY;
- foreach( const size_t &i, update_seq ) {
+ bforeach( const size_t &i, update_seq ) {
Factor nb = calcNewBelief( i );
if( nb.hasNaNs() ) {
s -= beliefV(i).entropy();
for( size_t I = 0; I < nrFactors(); I++ ) {
Factor henk;
- foreach( const Neighbor &j, nbF(I) ) // for all j in I
+ bforeach( const Neighbor &j, nbF(I) ) // for all j in I
henk *= _beliefs[j];
henk.normalize();
Factor piet;
size_t maxruns = 1000;
for( size_t i = 0; i < G.nrNodes(); i++ )
- foreach( const Neighbor &j, G.nb(i) )
+ bforeach( const Neighbor &j, G.nb(i) )
M[i][j.iter] = 0.1;
size_t run=0;
maxdev=0.0;
run++;
for( size_t i = 0; i < G.nrNodes(); i++ ) {
- foreach( const Neighbor &j, G.nb(i) ) {
+ bforeach( const Neighbor &j, G.nb(i) ) {
size_t _j = j.iter;
size_t _i = G.findNb(j,i);
DAI_ASSERT( G.nb(j,_i) == i );
bpcav.run();
BBP bbp( &bpcav, PropertySet()("verbose",(size_t)0)("tol",(Real)1.0e-9)("maxiter",(size_t)10000)("damping",(Real)0.0)("updates",string("SEQ_MAX")) );
- foreach( const Neighbor &j, G.nb(i) ) {
+ bforeach( const Neighbor &j, G.nb(i) ) {
// Create weights for magnetization of some spin
Prob p( 2, 0.0 );
p.set( 0, -1.0 );
// run BBP to estimate adjoints
bbp.run();
- foreach( const Neighbor &k, G.nb(i) ) {
+ bforeach( const Neighbor &k, G.nb(i) ) {
if( k != j )
cors[i][j.iter][k.iter] = (bbp.adj_psi_V(k)[1] - bbp.adj_psi_V(k)[0]);
else
}
}
for( size_t i = 0; i < N; i++ )
- foreach( const Neighbor &j, G.nb(i) )
+ bforeach( const Neighbor &j, G.nb(i) )
tJ[i][j.iter] = tanh( tJ[i][j.iter] );
// construct M
// Construct outer regions (giving them counting number 1.0)
_ORs.clear();
_ORs.reserve( ors.size() );
- foreach( const VarSet &alpha, ors )
+ bforeach( const VarSet &alpha, ors )
_ORs.push_back( FRegion(Factor(alpha, 1.0), 1.0) );
// For each factor, find an outer region that subsumes that factor.
has_unassigned_ancestor = true;
if( !has_unassigned_ancestor ) {
Real c = 1.0;
- foreach( const Neighbor &alpha, nbIR(beta) )
+ bforeach( const Neighbor &alpha, nbIR(beta) )
c -= OR(alpha).c();
for( vector<size_t>::const_iterator beta2 = ancestors[beta].begin(); beta2 != ancestors[beta].end(); beta2++ )
c -= IR(*beta2).c();
for( size_t beta = 0; beta < rg.nrIRs(); beta++ )
os << "\tb" << beta << " [label=\"b" << beta << ": " << (VarSet)rg.IR(beta) << ", c=" << rg.IR(beta).c() << "\"];" << endl;
for( size_t alpha = 0; alpha < rg.nrORs(); alpha++ )
- foreach( const Neighbor &beta, rg.nbOR(alpha) )
+ bforeach( const Neighbor &beta, rg.nbOR(alpha) )
os << "\ta" << alpha << " -> b" << beta << ";" << endl;
os << "}" << endl;
return os;
for( size_t i = 0; i < fg.nrVars(); i++ ) {
SmallSet<size_t> delta_i = fg.bipGraph().delta1( i, false );
const Var& v_i = fg.var(i);
- foreach( size_t j, delta_i )
+ bforeach( size_t j, delta_i )
if( i < j ) {
const Var& v_j = fg.var(j);
VarSet v_ij( v_i, v_j );
SmallSet<size_t> nb_ij = fg.bipGraph().nb1Set( i ) | fg.bipGraph().nb1Set( j );
Factor piet;
- foreach( size_t I, nb_ij ) {
+ bforeach( size_t I, nb_ij ) {
const VarSet& Ivars = fg.factor(I).vars();
if( props.type == Properties::TypeType::ORG ) {
if( (Ivars == v_i) || (Ivars == v_j) )
}
for( size_t i = 0; i < nrVars(); ++i ) {
Real c_i = 0.0;
- foreach( const Neighbor &I, nbV(i) )
+ bforeach( const Neighbor &I, nbV(i) )
c_i += Weight(I);
if( c_i != 1.0 )
sum += (1.0 - c_i) * beliefV(i).entropy(); // TRWBP/FBP
prod ^= (1.0 / c_I); // TRWBP
// Calculate product of incoming messages and factor I
- foreach( const Neighbor &j, nbF(I) )
+ bforeach( const Neighbor &j, nbF(I) )
if( !(without_i && (j == i)) ) {
const Var &v_j = var(j);
// prod_j will be the product of messages coming into j
// TRWBP: corresponds to messages n_jI
Prob prod_j( v_j.states(), props.logdomain ? 0.0 : 1.0 );
- foreach( const Neighbor &J, nbV(j) ) {
+ bforeach( const Neighbor &J, nbV(j) ) {
Real c_J = Weight(J); // TRWBP
if( J != I ) { // for all J in nb(j) \ I
if( props.logdomain )
// This code has been copied from bp.cpp, except where comments indicate TRWBP-specific behaviour
void TRWBP::calcBeliefV( size_t i, Prob &p ) const {
p = Prob( var(i).states(), props.logdomain ? 0.0 : 1.0 );
- foreach( const Neighbor &I, nbV(i) ) {
+ bforeach( const Neighbor &I, nbV(i) ) {
Real c_I = Weight(I);
if( props.logdomain )
p += newMessage( i, I.iter ) * c_I;
BOOST_CHECK( G.isConnected() );
BOOST_CHECK_EQUAL( G.isTree(), N < 3 );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK( G.isConnected() );
BOOST_CHECK_EQUAL( G.isTree(), (N1 <= 1) || (N2 <= 1) );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK( G.isConnected() );
BOOST_CHECK_EQUAL( G.isTree(), (G.nrNodes() <= 2) );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK( G.isConnected() );
BOOST_CHECK_EQUAL( G.isTree(), (G.nrNodes() == 0) || (N1 <= 1 && N2 <= 1) || (N1 <= 1 && N3 <= 1) || (N2 <= 1 && N3 <= 1) );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK( G.isConnected() );
BOOST_CHECK_EQUAL( G.isTree(), (G.nrNodes() <= 2) );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK( G.isConnected() );
BOOST_CHECK_EQUAL( G.isTree(), N <= 2 );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK( G.isConnected() );
BOOST_CHECK( G.isTree() );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
BOOST_CHECK_EQUAL( G.nrEdges(), d * N / 2 );
for( size_t n1 = 0; n1 < G.nrNodes(); n1++ ) {
BOOST_CHECK_EQUAL( G.nb(n1).size(), d );
- foreach( const Neighbor &n2, G.nb(n1) ) {
+ bforeach( const Neighbor &n2, G.nb(n1) ) {
BOOST_CHECK( G.hasEdge( n1, n2 ) );
BOOST_CHECK( G.hasEdge( n2, n1 ) );
}
factors.reserve( G.nrEdges() + N );
// Pairwise factors
for( size_t i = 0; i < N; i++ )
- foreach( const Neighbor &j, G.nb(i) )
+ bforeach( const Neighbor &j, G.nb(i) )
if( i < j ) {
if( ft == FactorType::POTTS )
factors.push_back( createFactorPotts( vars[i], vars[j], beta ) );