Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement BaBSR Branching Heuristic as a new Branching Strategy #851

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
04068b1
wrote header file for BaBSR Heuristic Divider
liamjdavis Sep 20, 2024
cd327ef
updated divide strategy options to include BaBSR Heuristic
liamjdavis Sep 20, 2024
8104b02
wrote framework for BaBsr Heuristic Divider
liamjdavis Sep 24, 2024
113c13c
built framework for BaBSR Heuristic
liamjdavis Sep 24, 2024
bec2317
built BaBSR Heuristic as ReLU splitting strategy
liamjdavis Oct 5, 2024
0164e01
Merge branch 'NeuralNetworkVerification:master' into liamjdavis/BaBSR…
liamjdavis Oct 8, 2024
533657c
wrote unit test for ReluConstraint computeBaBsr method
liamjdavis Oct 18, 2024
d5d77a6
changed BaBsr logic to pass NLR instance through ReluConstraint inste…
liamjdavis Oct 18, 2024
e0e9962
integrated NLR into unit test
liamjdavis Oct 21, 2024
c8ef584
fixed getBiasForPreviousLayer in NLR, need to write updated unit test
liamjdavis Oct 22, 2024
4b40e6f
Wrote unit tests in new test file Test_BaBsrSplitting.h for BaBSR ReL…
liamjdavis Oct 27, 2024
6a6b3d5
added BaBSR heuristic to command line options
liamjdavis Oct 28, 2024
aa336a6
debugged options parser to properly select babsr heuristic
liamjdavis Oct 28, 2024
fae9fa1
changed previous layer search to use Relu Constraint instead of activ…
liamjdavis Oct 29, 2024
697cbf0
Adjusted BaBSR Branching Strategy to cache all Biases and add candida…
liamjdavis Oct 31, 2024
1c3746f
added score normalization to babsr heuristic
liamjdavis Nov 12, 2024
31e4432
Merge remote-tracking branch 'origin/master' into liamjdavis/BaBSR-he…
liamjdavis Dec 3, 2024
cf2185e
Added new feature to teh CHANGELOG.md
liamjdavis Dec 17, 2024
ee4e713
Improved naming, refactored bias caching from ReluConstraint, and upd…
liamjdavis Jan 10, 2025
a5eccbe
Configured previous bias in ReluConstraint to be cached in double
liamjdavis Jan 10, 2025
abf1db1
Removed unnecessary header files form Test_BaBsrSplitting.h
liamjdavis Jan 12, 2025
6e6d405
Refactor previous bias handling to store them upfront in the NLR
liamjdavis Jan 12, 2025
ce03e4f
Removed BaBSR from SnC strategy parsing in Engine
liamjdavis Jan 17, 2025
fad1a47
Renamed option from 'babsr-heuristic' to 'babsr'
liamjdavis Jan 22, 2025
b157597
Merge branch 'master' into liamjdavis/BaBSR-heuristic
wu-haoze Feb 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
- Adding incremental infrastructure which allows pushing and popping constraints to/from the InputQuery.
- Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702.
- Fixed bug in the parsing of `transpose` nodes in command line C++ parser.
- Added the BaBSR heuristic as a new branching strategy for ReLU Splitting

## Version 2.0.0

Expand Down
1 change: 1 addition & 0 deletions src/configuration/GlobalConfiguration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100;
const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE =
GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION;

const unsigned GlobalConfiguration::BABSR_CANDIDATES_THRESHOLD = 5;
const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5;

const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5;
Expand Down
5 changes: 5 additions & 0 deletions src/configuration/GlobalConfiguration.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,11 @@ class GlobalConfiguration
};
static const BasisFactorizationType BASIS_FACTORIZATION_TYPE;

/* In the BaBSR-based branching heuristics, only this many earliest nodes are considered to
branch on.
*/
static const unsigned BABSR_CANDIDATES_THRESHOLD;

/* In the polarity-based branching heuristics, only this many earliest nodes
are considered to branch on.
*/
Expand Down
2 changes: 1 addition & 1 deletion src/configuration/OptionParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ void OptionParser::initialize()
&( ( *_stringOptions )[Options::SPLITTING_STRATEGY] ) )
->default_value( ( *_stringOptions )[Options::SPLITTING_STRATEGY] ),
"The branching strategy "
"(earliest-relu/pseudo-impact/largest-interval/relu-violation/polarity)."
"(earliest-relu/pseudo-impact/largest-interval/relu-violation/polarity/babsr)."
" pseudo-impact is specific to the DeepSoI (default) procedure and relu-violation is "
"specific to the Reluplex procedure.\n" )(
"soi-split-threshold",
Expand Down
2 changes: 2 additions & 0 deletions src/configuration/Options.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@ DivideStrategy Options::getDivideStrategy() const
return DivideStrategy::LargestInterval;
else if ( strategyString == "pseudo-impact" )
return DivideStrategy::PseudoImpact;
else if ( strategyString == "babsr" )
return DivideStrategy::BaBSR;
else
return DivideStrategy::Auto;
}
Expand Down
1 change: 1 addition & 0 deletions src/engine/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ engine_add_unit_test(SoftmaxConstraint)
engine_add_unit_test(SmtCore)
engine_add_unit_test(SumOfInfeasibilitiesManager)
engine_add_unit_test(Tableau)
engine_add_unit_test(BaBsrSplitting)

if (${BUILD_PYTHON})
target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
Expand Down
3 changes: 2 additions & 1 deletion src/engine/DivideStrategy.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,13 @@
enum class DivideStrategy {
// Relu splitting
Polarity = 0, // Pick the ReLU with the polarity closest to 0 among the first K nodes
BaBSR, // Pick the ReLU with the highest BaBSR score
EarliestReLU, // Pick a ReLU that appears in the earliest layer
ReLUViolation, // Pick the ReLU that has been violated for the most times
LargestInterval, // Pick the largest interval every K split steps, use ReLUViolation in other
// steps
PseudoImpact, // The pseudo-impact heuristic associated with SoI.
Auto, // See decideBranchingHeursitics() in Engine.h
Auto, // See decideBranchingHeuristics() in Engine.h
};

#endif // __DivideStrategy_h__
Expand Down
73 changes: 73 additions & 0 deletions src/engine/Engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2704,6 +2704,77 @@ void Engine::decideBranchingHeuristics()
_smtCore.initializeScoreTrackerIfNeeded( _plConstraints );
}

PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnBaBsrHeuristic()
{
ENGINE_LOG( Stringf( "Using BaBsr heuristic with normalized scores..." ).ascii() );

if ( !_networkLevelReasoner )
return NULL;

// Get constraints from NLR
List<PiecewiseLinearConstraint *> constraints =
_networkLevelReasoner->getConstraintsInTopologicalOrder();

// Temporary vector to store raw scores for normalization
Vector<double> rawScores;
Map<double, PiecewiseLinearConstraint *> scoreToConstraint;

// Filter for ReLU constraints
for ( auto &plConstraint : constraints )
{
if ( plConstraint->supportBaBsr() && plConstraint->isActive() &&
!plConstraint->phaseFixed() )
{
ReluConstraint *reluConstraint = dynamic_cast<ReluConstraint *>( plConstraint );
if ( reluConstraint )
{
// Set NLR if not already set
reluConstraint->initializeNLRForBaBSR( _networkLevelReasoner );

// Calculate heuristic score - bias calculation happens inside computeBaBsr
plConstraint->updateScoreBasedOnBaBsr();

// Collect raw scores
rawScores.append( plConstraint->getScore() );
}
}
}

// Normalize scores
if ( !rawScores.empty() )
{
double minScore = *std::min_element( rawScores.begin(), rawScores.end() );
double maxScore = *std::max_element( rawScores.begin(), rawScores.end() );
double range = maxScore - minScore;

for ( auto &plConstraint : constraints )
{
if ( plConstraint->supportBaBsr() && plConstraint->isActive() &&
!plConstraint->phaseFixed() )
{
double rawScore = plConstraint->getScore();
double normalizedScore = range > 0 ? ( rawScore - minScore ) / range : 0;

// Store normalized score in the map
scoreToConstraint[normalizedScore] = plConstraint;
if ( scoreToConstraint.size() >= GlobalConfiguration::BABSR_CANDIDATES_THRESHOLD )
break;
}
}
}

// Split on neuron or constraint with highest normalized score
if ( !scoreToConstraint.empty() )
{
ENGINE_LOG( Stringf( "Normalized score of the picked ReLU: %f",
( *scoreToConstraint.begin() ).first )
.ascii() );
return ( *scoreToConstraint.begin() ).second;
}
else
return NULL;
}

PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnPolarity()
{
ENGINE_LOG( Stringf( "Using Polarity-based heuristics..." ).ascii() );
Expand Down Expand Up @@ -2815,6 +2886,8 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg
candidatePLConstraint = _smtCore.getConstraintsWithHighestScore();
}
}
else if ( strategy == DivideStrategy::BaBSR )
candidatePLConstraint = pickSplitPLConstraintBasedOnBaBsrHeuristic();
else if ( strategy == DivideStrategy::Polarity )
candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity();
else if ( strategy == DivideStrategy::EarliestReLU )
Expand Down
5 changes: 5 additions & 0 deletions src/engine/Engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -751,6 +751,11 @@ class Engine
*/
void decideBranchingHeuristics();

/*
Pick the ReLU with the highest BaBSR heuristic score.
*/
PiecewiseLinearConstraint *pickSplitPLConstraintBasedOnBaBsrHeuristic();

/*
Among the earliest K ReLUs, pick the one with Polarity closest to 0.
K is equal to GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD
Expand Down
11 changes: 11 additions & 0 deletions src/engine/PiecewiseLinearConstraint.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,14 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher
return false;
}

/*
Return true if and only if this piecewise linear constraint supports the BaBsr Heuristic
*/
virtual bool supportBaBsr() const
{
return false;
}

/*
Update the preferred direction to take first when splitting on this PLConstraint
*/
Expand All @@ -327,6 +335,9 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher
return _score;
}

virtual void updateScoreBasedOnBaBsr()
{
}

virtual void updateScoreBasedOnPolarity()
{
Expand Down
35 changes: 35 additions & 0 deletions src/engine/ReluConstraint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1019,6 +1019,11 @@ bool ReluConstraint::supportPolarity() const
return true;
}

bool ReluConstraint::supportBaBsr() const
{
return true;
}

bool ReluConstraint::auxVariableInUse() const
{
return _auxVarInUse;
Expand All @@ -1029,6 +1034,31 @@ unsigned ReluConstraint::getAux() const
return _aux;
}


double ReluConstraint::computeBaBsr() const
{
if ( !_networkLevelReasoner )
throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_NOT_AVAILABLE );

double biasTerm = _networkLevelReasoner->getPreviousBias( this );

// get upper and lower bounds
double ub = getUpperBound( _b );
double lb = getLowerBound( _b );

// cast _b and _f to doubles
double reluInput = _tableau->getValue( _b ); // ReLU input before activation
double reluOutput = _tableau->getValue( _f ); // ReLU output after activation

// compute ReLU score
double scaler = ub / ( ub - lb );
double term1 =
std::min( scaler * reluInput * biasTerm, ( scaler - 1.0 ) * reluInput * biasTerm );
double term2 = ( scaler * lb ) * reluOutput;

return term1 - term2;
}

double ReluConstraint::computePolarity() const
{
double currentLb = getLowerBound( _b );
Expand All @@ -1052,6 +1082,11 @@ PhaseStatus ReluConstraint::getDirection() const
return _direction;
}

void ReluConstraint::updateScoreBasedOnBaBsr()
{
_score = std::abs( computeBaBsr() );
}

void ReluConstraint::updateScoreBasedOnPolarity()
{
_score = std::abs( computePolarity() );
Expand Down
29 changes: 29 additions & 0 deletions src/engine/ReluConstraint.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,15 @@

#include <cmath>

/*
Namespace for Network Level Reasoner Instance.
*/
namespace NLR {
class NetworkLevelReasoner;
class Layer;
struct NeuronIndex;
} // namespace NLR

class ReluConstraint : public PiecewiseLinearConstraint
{
public:
Expand All @@ -57,6 +66,15 @@ class ReluConstraint : public PiecewiseLinearConstraint
*/
PiecewiseLinearConstraint *duplicateConstraint() const override;

/*
Initialize the network level reasoner needed for BaBSR branching heuristics.
*/

void initializeNLRForBaBSR( NLR::NetworkLevelReasoner *nlr )
{
_networkLevelReasoner = nlr;
}

/*
Restore the state of this constraint from the given one.
*/
Expand Down Expand Up @@ -215,6 +233,10 @@ class ReluConstraint : public PiecewiseLinearConstraint

bool supportPolarity() const override;

bool supportBaBsr() const override;

double computeBaBsr() const;

/*
Return the polarity of this ReLU, which computes how symmetric
the bound of the input to this ReLU is with respect to 0.
Expand All @@ -227,6 +249,7 @@ class ReluConstraint : public PiecewiseLinearConstraint
always between -1 and 1. The closer it is to 0, the more symmetric the
bound is.
*/

double computePolarity() const;

/*
Expand All @@ -237,12 +260,18 @@ class ReluConstraint : public PiecewiseLinearConstraint

PhaseStatus getDirection() const;

/*
Update the score based on the BaBsr heuristic
*/
void updateScoreBasedOnBaBsr() override;

void updateScoreBasedOnPolarity() override;

const List<unsigned> getNativeAuxVars() const override;

private:
unsigned _b, _f;
NLR::NetworkLevelReasoner *_networkLevelReasoner;
bool _auxVarInUse;
unsigned _aux;

Expand Down
Loading
Loading