diff --git a/CHANGELOG.md b/CHANGELOG.md index 68cb7719e..412c3c810 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,6 @@ - Adding incremental infrastructure which allows pushing and popping constraints to/from the InputQuery. - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. - - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting ## Version 2.0.0 diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index f9a074f07..b84f9e4a5 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -71,7 +71,6 @@ const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; -const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 3104edf79..dfa5814bc 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -195,9 +195,8 @@ class GlobalConfiguration Symbolic bound tightening options */ - // Symbolic tightening, LP rounding constants + // Symbolic tightening rounding constant static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - static const double LP_TIGHTENING_ROUNDING_CONSTANT; static const double SIGMOID_CUTOFF_CONSTANT; diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 6864e9fc7..358c11734 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,7 +267,7 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/backward-once/backward-converge/lp-inc/milp/milp-inc/iter-prop/none." ) + "lp/fb-once/fb-converge/lp-inc/milp/milp-inc/iter-prop/none." ) #endif ; diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 86f45ecd0..311daef54 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1579,11 +1579,7 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) // TODO: Remove this block after getting ready to support sigmoid with MILP Bound // Tightening. - if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && + if ( _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE && _preprocessedQuery->getNonlinearConstraints().size() > 0 ) throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, "Marabou doesn't support sigmoid with MILP Bound Tightening" ); diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c7619f4d1..552fa6461 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -15,7 +15,6 @@ #include "LPFormulator.h" -#include "DeepPolySoftmaxElement.h" #include "GurobiWrapper.h" #include "InfeasibleQueryException.h" #include "Layer.h" @@ -323,15 +322,8 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) .ascii() ); - // Clean up clearSolverQueue( freeSolvers ); - if ( threads ) - { - delete[] threads; - threads = NULL; - } - if ( infeasible ) throw InfeasibleQueryException(); } @@ -500,16 +492,7 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, threads[i].interrupt(); threads[i].join(); } - - // Clean up clearSolverQueue( freeSolvers ); - - if ( threads ) - { - delete[] threads; - threads = NULL; - } - throw InfeasibleQueryException(); } @@ -586,9 +569,7 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & { LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); double ub = optimizeWithGurobi( - *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; - ; + *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ); LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); // Store the new bound if it is tighter @@ -618,8 +599,7 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); gurobi->reset(); double lb = optimizeWithGurobi( - *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ); LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); // Store the new bound if it is tighter if ( lb > currentLb ) @@ -690,6 +670,7 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers } } + void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -708,18 +689,10 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); break; - case Layer::ROUND: - addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - case Layer::LEAKY_RELU: addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); break; - case Layer::ABSOLUTE_VALUE: - addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - case Layer::SIGN: addSignLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -728,18 +701,6 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); break; - case Layer::SIGMOID: - addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::SOFTMAX: - addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::BILINEAR: - addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - default: throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); break; @@ -846,262 +807,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } - -void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::round( sourceValue ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( FloatUtils::round( sourceUb ), layer->getUb( i ) ); - double lb = std::max( FloatUtils::round( sourceLb ), layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = round(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - // y <= x + 0.5, i.e. y - x <= 0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 0.5 ); - - // y >= x - 0.5, i.e. y - x >= -0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -0.5 ); - } - } - } -} - - -void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The AbsoluteValue is active, y = x - if ( sourceLb < 0 ) - sourceLb = 0; - - double ub = std::min( sourceUb, layer->getUb( i ) ); - double lb = std::max( sourceLb, layer->getLb( i ) ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - double ub = std::min( -sourceLb, layer->getUb( i ) ); - double lb = std::max( -sourceUb, layer->getLb( i ) ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); - double lb = std::max( 0.0, layer->getLb( i ) ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ - // y >= 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y <= max(-lb, ub) - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, ub ); - } - } - } -} - - -void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = SigmoidConstraint::sigmoid( sourceValue ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - - double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); - double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); - - double ub = std::min( sourceUbSigmoid, layer->getUb( i ) ); - double lb = std::max( sourceLbSigmoid, layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = sigmoid(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); - double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - - // update lower bound - if ( FloatUtils::isPositive( sourceLb ) ) - { - // y >= lambda * (x - l) + sigmoid(lb), i.e. y - lambda * x >= sigmoid(lb) - - // lambda * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambda ); - } - - else - { - // y >= lambda' * (x - l) + sigmoid(lb), i.e. y - lambda' * x >= sigmoid(lb) - - // lambda' * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambdaPrime ); - } - - // update upper bound - if ( !FloatUtils::isPositive( sourceUb ) ) - { - // y <= lambda * (x - u) + sigmoid(ub), i.e. y - lambda * x <= sigmoid(ub) - - // lambda * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambda ); - } - else - { - // y <= lambda' * (x - u) + sigmoid(ub), i.e. y - lambda' * x <= sigmoid(ub) - - // lambda' * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambdaPrime ); - } - } - } - } -} - - void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1185,7 +890,6 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1267,286 +971,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - -void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - Set handledInputNeurons; - List sources = layer->getActivationSources( i ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceMids; - Vector targetLbs; - Vector targetUbs; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceMids.append( ( sourceLb + sourceUb ) / 2 ); - targetLbs.append( layer->getLb( i ) ); - targetUbs.append( layer->getUb( i ) ); - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &source : sources ) - { - if ( handledInputNeurons.exists( source._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( source._neuron ); - break; - } - } - - double ub = - std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), - layer->getUb( i ) ); - double lb = - std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), - layer->getLb( i ) ); - targetLbs[index] = lb; - targetUbs[index] = ub; - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - double bias; - SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - - List terms; - if ( FloatUtils::areEqual( lb, ub ) ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - else - { - // Compute symbolic bound - if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) - { - bool useLSE2 = false; - for ( const auto &lb : targetLbs ) - { - if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) - useLSE2 = true; - } - unsigned inputIndex = 0; - if ( !useLSE2 ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( - sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( - GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - else - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( - GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( - sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dLSEUpperbound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); - unsigned inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dERLowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dERUpperBound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - } - } -} - -void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - Vector sourceNeurons; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - - sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); - - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double targetValue = sourceValues[0] * sourceValues[1]; - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - continue; - } - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // Lower bound: out >= l_y * x + l_x * y - l_x * l_y - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( - -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( - -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); - - // Upper bound: out <= u_y * x + l_x * y - l_x * u_y - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( - -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( - -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); - } - } -} - - void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1650,7 +1074,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, if ( !FloatUtils::isNegative( sourceLb ) ) { - // The LeakyReLU is active, y = x + // The ReLU is active, y = x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1659,7 +1083,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } else if ( !FloatUtils::isPositive( sourceUb ) ) { - // The LeakyReLU is inactive, y = alpha * x + // The ReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); @@ -1672,7 +1096,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* - The phase of this LeakyReLU is not yet fixed. + The phase of this ReLU is not yet fixed. For y = LeakyReLU(x), we add the following triangular relaxation: 1. y >= alpha * x 2. y >= x diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cee0abbb6..9a12a9e61 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -104,25 +104,6 @@ class LPFormulator : public ParallelSolver void addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - void - addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ); - - void addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ); - - void addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ); - - void addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ); - void addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e690053..01b6a6aaf 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -305,7 +305,7 @@ void Layer::computeSimulations() unsigned sourceSize = sourceLayerEntry.second; const double *weights = _layerToWeights[sourceLayerEntry.first]; - for ( unsigned i = 0; i < _size; ++i ) + for ( unsigned i = 0; i < _size; i++ ) { for ( unsigned j = 0; j < simulationSize; ++j ) _simulations[i][j] = _bias[i]; @@ -399,63 +399,6 @@ void Layer::computeSimulations() _simulations[i][j] = 1 / ( 1 + std::exp( -simulations.get( j ) ) ); } } - else if ( _type == ROUND ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = FloatUtils::round( simulations.get( j ) ); - } - } - else if ( _type == SOFTMAX ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - { - _simulations[i][j] = FloatUtils::negativeInfinity(); - - Vector inputs; - Vector outputs; - unsigned outputIndex = 0; - unsigned index = 0; - - for ( const auto &input : _neuronToActivationSources[i] ) - { - if ( input._neuron == i ) - outputIndex = index; - double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) - .get( input._neuron ) - .get( j ); - inputs.append( value ); - ++index; - } - - SoftmaxConstraint::softmax( inputs, outputs ); - _simulations[i][j] = outputs[outputIndex]; - } - } - } - else if ( _type == BILINEAR ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - { - _simulations[i][j] = 1; - for ( const auto &input : _neuronToActivationSources[i] ) - { - double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) - .get( input._neuron ) - .get( j ); - _simulations[i][j] *= value; - } - } - } - } else { printf( "Error! Neuron type %u unsupported\n", _type ); @@ -725,29 +668,7 @@ void Layer::computeIntervalArithmeticBounds() computeIntervalArithmeticBoundsForSign(); break; - case ROUND: - computeIntervalArithmeticBoundsForRound(); - break; - - case LEAKY_RELU: - computeIntervalArithmeticBoundsForLeakyRelu(); - break; - - case SIGMOID: - computeIntervalArithmeticBoundsForSigmoid(); - break; - case MAX: - computeIntervalArithmeticBoundsForMax(); - break; - - case SOFTMAX: - computeIntervalArithmeticBoundsForSoftmax(); - break; - - case BILINEAR: - computeIntervalArithmeticBoundsForBilinear(); - break; default: printf( "Error! Activation type %u unsupported\n", _type ); @@ -801,13 +722,13 @@ void Layer::computeIntervalArithmeticBoundsForWeightedSum() if ( _eliminatedNeurons.exists( i ) ) continue; - if ( _lb[i] < newLb[i] ) + if ( newLb[i] > _lb[i] ) { _lb[i] = newLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > newUb[i] ) + if ( newUb[i] < _ub[i] ) { _ub[i] = newUb[i]; _layerOwner->receiveTighterBound( @@ -835,13 +756,13 @@ void Layer::computeIntervalArithmeticBoundsForRelu() if ( lb < 0 ) lb = 0; - if ( _lb[i] < lb ) + if ( lb > _lb[i] ) { _lb[i] = lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > ub ) + if ( ub < _ub[i] ) { _ub[i] = ub; _layerOwner->receiveTighterBound( @@ -865,13 +786,13 @@ void Layer::computeIntervalArithmeticBoundsForAbs() if ( lb > 0 ) { - if ( _lb[i] < lb ) + if ( lb > _lb[i] ) { _lb[i] = lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > ub ) + if ( ub < _ub[i] ) { _ub[i] = ub; _layerOwner->receiveTighterBound( @@ -880,13 +801,13 @@ void Layer::computeIntervalArithmeticBoundsForAbs() } else if ( ub < 0 ) { - if ( _lb[i] < -ub ) + if ( -ub > _lb[i] ) { _lb[i] = -ub; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > -lb ) + if ( -lb < _ub[i] ) { _ub[i] = -lb; _layerOwner->receiveTighterBound( @@ -903,7 +824,7 @@ void Layer::computeIntervalArithmeticBoundsForAbs() Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > FloatUtils::max( ub, -lb ) ) + if ( FloatUtils::max( ub, -lb ) < _ub[i] ) { _ub[i] = FloatUtils::max( ub, -lb ); _layerOwner->receiveTighterBound( @@ -926,682 +847,460 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - double new_lb; - double new_ub; - if ( !FloatUtils::isNegative( lb ) ) { - new_lb = 1; - new_ub = 1; + _lb[i] = 1; + _ub[i] = 1; } else if ( FloatUtils::isNegative( ub ) ) { - new_lb = -1; - new_ub = -1; + _lb[i] = -1; + _ub[i] = -1; } else { - new_lb = -1; - new_ub = 1; + _lb[i] = -1; + _ub[i] = 1; } + } +} - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < new_lb ) - { - _lb[i] = new_lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } +void Layer::computeSymbolicBounds() +{ + switch ( _type ) + { + case INPUT: + comptueSymbolicBoundsForInput(); + break; - if ( _ub[i] > new_ub ) - { - _ub[i] = new_ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } + case WEIGHTED_SUM: + computeSymbolicBoundsForWeightedSum(); + break; + + case RELU: + computeSymbolicBoundsForRelu(); + break; + + case SIGN: + computeSymbolicBoundsForSign(); + break; + + case ABSOLUTE_VALUE: + computeSymbolicBoundsForAbsoluteValue(); + break; + + default: + computeSymbolicBoundsDefault(); + break; } } -void Layer::computeIntervalArithmeticBoundsForLeakyRelu() +void Layer::computeSymbolicBoundsDefault() { - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; + // This is the default operation, for layers that are not + // supported yet. The "symbolic" bounds computed are just the + // concrete bounds. - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); + for ( unsigned i = 0; i < _size; ++i ) + { + double lb; + double ub; - if ( lb > 0 ) - { - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - else if ( ub < 0 ) + if ( _eliminatedNeurons.exists( i ) ) { - if ( _lb[i] < _alpha * lb ) - { - _lb[i] = _alpha * lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > _alpha * ub ) - { - _ub[i] = _alpha * ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } + lb = _eliminatedNeurons[i]; + ub = _eliminatedNeurons[i]; } else { - // lb < 0 < ub - if ( _lb[i] < _alpha * lb ) - { - _lb[i] = _alpha * lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } + lb = _lb[i]; + ub = _ub[i]; } + + _symbolicLowerBias[i] = lb; + _symbolicUpperBias[i] = ub; + + _symbolicLbOfLb[i] = lb; + _symbolicUbOfLb[i] = ub; + _symbolicLbOfUb[i] = lb; + _symbolicUbOfUb[i] = ub; } } -void Layer::computeIntervalArithmeticBoundsForSigmoid() +void Layer::comptueSymbolicBoundsForInput() { + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + // For the input layer, the bounds are just the identity polynomials for ( unsigned i = 0; i < _size; ++i ) { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); + _symbolicLb[_size * i + i] = 1; + _symbolicUb[_size * i + i] = 1; - double lbSigmoid = SigmoidConstraint::sigmoid( lb ); - double ubSigmoid = SigmoidConstraint::sigmoid( ub ); + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + double lb = _lb[i]; + double ub = _ub[i]; - if ( _lb[i] < lbSigmoid ) - { - _lb[i] = lbSigmoid; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ubSigmoid ) + if ( _eliminatedNeurons.exists( i ) ) { - _ub[i] = ubSigmoid; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + lb = _eliminatedNeurons[i]; + ub = _eliminatedNeurons[i]; } + + _symbolicLbOfLb[i] = lb; + _symbolicUbOfLb[i] = ub; + _symbolicLbOfUb[i] = lb; + _symbolicUbOfUb[i] = ub; } } - -void Layer::computeIntervalArithmeticBoundsForRound() +void Layer::computeSymbolicBoundsForRelu() { + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + for ( unsigned i = 0; i < _size; ++i ) { if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - double lbRound = FloatUtils::round( lb ); - double ubRound = FloatUtils::round( ub ); - - - if ( _lb[i] < lbRound ) - { - _lb[i] = lbRound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ubRound ) { - _ub[i] = ubRound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; } } -} - -void Layer::computeIntervalArithmeticBoundsForMax() -{ for ( unsigned i = 0; i < _size; ++i ) { if ( _eliminatedNeurons.exists( i ) ) continue; - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + /* + There are two ways we can determine that a ReLU has become fixed: - NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); - double maxLowerBound = FloatUtils::negativeInfinity(); - double maxUpperBound = FloatUtils::negativeInfinity(); + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; - Map sourceLbs; - Map sourceUbs; - unsigned counter = 0; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; - sourceLbs[sourceIndex] = sourceLb; - sourceUbs[sourceIndex] = sourceUb; + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - if ( maxLowerBound < sourceLb ) - { - indexOfMaxLowerBound = sourceIndex; - maxLowerBound = sourceLb; - } - if ( maxUpperBound < sourceUb ) - { - maxUpperBound = sourceUb; - } - ++counter; - } + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - // The phase is fixed if the lower-bound of a source variable x_b is - // larger than the upper-bounds of the other source variables. - bool phaseFixed = true; - for ( const auto &sourceIndex : sources ) + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceIndex != indexOfMaxLowerBound && - FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) - { - phaseFixed = false; - break; - } + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - if ( phaseFixed ) - { - // Phase fixed - // Concrete bound: lb_b <= x_f <= ub_b - if ( _lb[i] < maxLowerBound ) - { - _lb[i] = maxLowerBound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) - { - _ub[i] = sourceUbs[indexOfMaxLowerBound]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + reluPhase = RELU_PHASE_ACTIVE; } - else + else if ( !FloatUtils::isPositive( sourceUb ) ) { - // MaxPool not fixed - // Concrete bounds: lb_b <= x_f <= maxUpperBound - if ( _lb[i] < maxLowerBound ) - { - _lb[i] = maxLowerBound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } + reluPhase = RELU_PHASE_INACTIVE; + } + + if ( reluPhase == PHASE_NOT_FIXED ) + { + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). - if ( _ub[i] > maxUpperBound ) + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) { - _ub[i] = maxUpperBound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); -void Layer::computeIntervalArithmeticBoundsForSoftmax() -{ - Set handledInputNeurons; - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + } - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + // Lower bound + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; - ASSERT( sourceLayer->getSize() == _size ); + _symbolicLowerBias[i] = 0; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - Vector sourceLbs; - Vector sourceUbs; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + _symbolicLbOfLb[i] = 0; } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &sourceIndex : sources ) + else { - if ( handledInputNeurons.exists( sourceIndex._neuron ) ) - ++index; + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } else { - handledInputNeurons.insert( sourceIndex._neuron ); - break; + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); - if ( _lb[i] < lb ) + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) { - _lb[i] = lb; + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > ub ) + + if ( _ub[i] > _symbolicUbOfUb[i] ) { - _ub[i] = ub; + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeIntervalArithmeticBoundsForBilinear() +void Layer::computeSymbolicBoundsForSign() { + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + for ( unsigned i = 0; i < _size; ++i ) { + // Eliminate neurons are skipped if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - ASSERT( sources.size() == 2 ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } + continue; } - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double value = sourceValues[0] * sourceValues[1]; + /* + There are two ways we can determine that a Sign has become fixed: - if ( _lb[i] < value ) - { - _lb[i] = value; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; - if ( _ub[i] > value ) - { - _ub[i] = value; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - continue; - } + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - if ( _lb[i] < lb ) + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + signPhase = SIGN_PHASE_POSITIVE; } - - if ( _ub[i] > ub ) + else if ( FloatUtils::isNegative( sourceUb ) ) { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + signPhase = SIGN_PHASE_NEGATIVE; } - } -} -void Layer::computeSymbolicBounds() -{ - switch ( _type ) - { - case INPUT: - computeSymbolicBoundsForInput(); - break; + if ( signPhase == PHASE_NOT_FIXED ) + { + // If we got here, we know that lbLb < 0 and ubUb + // > 0 - case WEIGHTED_SUM: - computeSymbolicBoundsForWeightedSum(); - break; + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 - case RELU: - computeSymbolicBoundsForRelu(); - break; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; - case SIGN: - computeSymbolicBoundsForSign(); - break; + _symbolicUpperBias[i] = 1; - case ABSOLUTE_VALUE: - computeSymbolicBoundsForAbsoluteValue(); - break; + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + else + { + // The upper bound's phase is not fixed, use the + // parallelogram approximation + double factor = -2.0 / _symbolicLbOfLb[i]; - case LEAKY_RELU: - computeSymbolicBoundsForLeakyRelu(); - break; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; - case ROUND: - computeSymbolicBoundsForRound(); - break; - case SIGMOID: - computeSymbolicBoundsForSigmoid(); - break; + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; - case MAX: - computeSymbolicBoundsForMax(); - break; - - case SOFTMAX: - computeSymbolicBoundsForSoftmax(); - break; - - case BILINEAR: - computeSymbolicBoundsForBilinear(); - break; - - default: - computeSymbolicBoundsDefault(); - break; - } -} - -void Layer::computeSymbolicBoundsDefault() -{ - // This is the default operation, for layers that are not - // supported yet. The "symbolic" bounds computed are just the - // concrete bounds. - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - double lb; - double ub; - - if ( _eliminatedNeurons.exists( i ) ) - { - lb = _eliminatedNeurons[i]; - ub = _eliminatedNeurons[i]; - } - else - { - lb = _lb[i]; - ub = _ub[i]; - } - - _symbolicLowerBias[i] = lb; - _symbolicUpperBias[i] = ub; - - _symbolicLbOfLb[i] = lb; - _symbolicUbOfLb[i] = ub; - _symbolicLbOfUb[i] = lb; - _symbolicUbOfUb[i] = ub; - } -} - -void Layer::computeSymbolicBoundsForInput() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - // For the input layer, the bounds are just the identity polynomials - for ( unsigned i = 0; i < _size; ++i ) - { - _symbolicLb[_size * i + i] = 1; - _symbolicUb[_size * i + i] = 1; - - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = 0; - - double lb = _lb[i]; - double ub = _ub[i]; - - if ( _eliminatedNeurons.exists( i ) ) - { - lb = _eliminatedNeurons[i]; - ub = _eliminatedNeurons[i]; - } - - _symbolicLbOfLb[i] = lb; - _symbolicUbOfLb[i] = ub; - _symbolicLbOfUb[i] = lb; - _symbolicUbOfUb[i] = ub; - } -} - -void Layer::computeSymbolicBoundsForRelu() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - /* - There are two ways we can determine that a ReLU has become fixed: - - 1. If the ReLU's variable has been externally fixed - 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) - */ - PhaseStatus reluPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( FloatUtils::isPositive( _lb[i] ) ) - reluPhase = RELU_PHASE_ACTIVE; - else if ( FloatUtils::isZero( _ub[i] ) ) - reluPhase = RELU_PHASE_INACTIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A ReLU initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - reluPhase = RELU_PHASE_ACTIVE; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - reluPhase = RELU_PHASE_INACTIVE; - } - - if ( reluPhase == PHASE_NOT_FIXED ) - { - // If we got here, we know that lbLb < 0 and ubUb - // > 0 There are four possible cases, depending on - // whether ubLb and lbUb are negative or positive - // (see Neurify paper, page 14). - - // Upper bound - if ( _symbolicLbOfUb[i] <= 0 ) - { - // lbOfUb[i] < 0 < ubOfUb[i] - // Concretize the upper bound using the Ehler's-like approximation - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; } // Lower bound - if ( _symbolicUbOfLb[i] <= 0 ) + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) { + // The lower bound is strictly negative - turns into + // the constant -1 + for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = 0; - _symbolicLowerBias[i] = 0; + _symbolicLowerBias[i] = -1; + + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; } else { + // The lower bound's phase is not fixed, use the + // parallelogram approximation + double factor = 2.0 / _symbolicUbOfUb[i]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / - ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + _symbolicLb[j * _size + i] *= factor; - _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / - ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - } + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; - _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } } else { - // The phase of this ReLU is fixed! - if ( reluPhase == RELU_PHASE_ACTIVE ) - { - // Active ReLU, bounds are propagated as is - } - else - { - // Inactive ReLU, returns zero - _symbolicLbOfLb[i] = 0; - _symbolicUbOfLb[i] = 0; - _symbolicLbOfUb[i] = 0; - _symbolicUbOfUb[i] = 0; + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = 0; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; } - if ( _symbolicLbOfUb[i] < 0 ) - _symbolicLbOfUb[i] = 0; + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; /* - We now have the tightest bounds we can for the relu + We now have the tightest bounds we can for the sign variable. If they are tigheter than what was previously known, store them. */ @@ -1621,14 +1320,13 @@ void Layer::computeSymbolicBoundsForRelu() } } -void Layer::computeSymbolicBoundsForSign() +void Layer::computeSymbolicBoundsForAbsoluteValue() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); for ( unsigned i = 0; i < _size; ++i ) { - // Eliminate neurons are skipped if ( _eliminatedNeurons.exists( i ) ) { _symbolicLowerBias[i] = _eliminatedNeurons[i]; @@ -1637,1576 +1335,104 @@ void Layer::computeSymbolicBoundsForSign() _symbolicLbOfLb[i] = _eliminatedNeurons[i]; _symbolicUbOfLb[i] = _eliminatedNeurons[i]; _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - - continue; - } - - /* - There are two ways we can determine that a Sign has become fixed: - - 1. If the Sign's variable has been externally fixed - 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) - */ - PhaseStatus signPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( !FloatUtils::isNegative( _lb[i] ) ) - signPhase = SIGN_PHASE_POSITIVE; - else if ( FloatUtils::isNegative( _ub[i] ) ) - signPhase = SIGN_PHASE_NEGATIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Sign initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - signPhase = SIGN_PHASE_POSITIVE; - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - signPhase = SIGN_PHASE_NEGATIVE; - } - - if ( signPhase == PHASE_NOT_FIXED ) - { - PhaseStatus upperSignPhase = PHASE_NOT_FIXED; - PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - - // If we got here, we know that lbLb < 0 and ubUb - // > 0 - - // Upper bound - if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) - { - // The upper bound is strictly positive - turns into - // the constant 1 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] = 0; - - _symbolicUpperBias[i] = 1; - - upperSignPhase = SIGN_PHASE_POSITIVE; - } - else - { - // The upper bound's phase is not fixed, use the - // parallelogram approximation - double factor = -2.0 / _symbolicLbOfLb[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] *= factor; - - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= factor; - _symbolicUpperBias[i] += 1; - } - - // Lower bound - if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) - { - // The lower bound is strictly negative - turns into - // the constant -1 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = 0; - - _symbolicLowerBias[i] = -1; - - lowerSignPhase = SIGN_PHASE_NEGATIVE; - } - else - { - // The lower bound's phase is not fixed, use the - // parallelogram approximation - double factor = 2.0 / _symbolicUbOfUb[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= factor; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= factor; - _symbolicLowerBias[i] -= 1; - } - - if ( upperSignPhase == PHASE_NOT_FIXED ) - { - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = -1; - } - else - { - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = 1; - } - - if ( lowerSignPhase == PHASE_NOT_FIXED ) - { - _symbolicUbOfLb[i] = 1; - _symbolicLbOfLb[i] = -1; - } - else - { - _symbolicUbOfLb[i] = -1; - _symbolicLbOfLb[i] = -1; - } - } - else - { - // The phase of this Sign is fixed! - double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; - - _symbolicLbOfLb[i] = constant; - _symbolicUbOfLb[i] = constant; - _symbolicLbOfUb[i] = constant; - _symbolicUbOfUb[i] = constant; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = constant; - _symbolicUpperBias[i] = constant; - } - - if ( _symbolicLbOfLb[i] < -1 ) - _symbolicLbOfLb[i] = -1; - if ( _symbolicUbOfUb[i] > 1 ) - _symbolicUbOfUb[i] = 1; - - /* - We now have the tightest bounds we can for the sign - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForAbsoluteValue() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - - continue; - } - - PhaseStatus absPhase = PHASE_NOT_FIXED; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - if ( sourceLb >= 0 ) - absPhase = ABS_PHASE_POSITIVE; - else if ( sourceUb <= 0 ) - absPhase = ABS_PHASE_NEGATIVE; - - if ( absPhase == PHASE_NOT_FIXED ) - { - // If we got here, we know that lbOfLb < 0 < ubOfUb. In this case, - // we do naive concretization: lb is 0, ub is the max between - // -lb and ub of the input neuron - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = 0; - _symbolicUb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = FloatUtils::max( -sourceLb, sourceUb ); - - _symbolicLbOfLb[i] = 0; - _symbolicUbOfLb[i] = _symbolicUpperBias[i]; - _symbolicLbOfUb[i] = 0; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - } - else - { - // The phase of this AbsoluteValueConstraint is fixed! - if ( absPhase == ABS_PHASE_POSITIVE ) - { - // Positive AbsoluteValue, bounds are propagated as is - } - else - { - // Negative AbsoluteValue, bounds are negated and flipped - double temp; - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - temp = _symbolicUb[j * _size + i]; - _symbolicUb[j * _size + i] = -_symbolicLb[j * _size + i]; - _symbolicLb[j * _size + i] = -temp; - } - - temp = _symbolicLowerBias[i]; - _symbolicLowerBias[i] = -_symbolicUpperBias[i]; - _symbolicUpperBias[i] = -temp; - - // Old lb, negated, is the new ub - temp = _symbolicLbOfLb[i]; - _symbolicLbOfLb[i] = -_symbolicUbOfUb[i]; - _symbolicUbOfUb[i] = -temp; - - temp = _symbolicUbOfLb[i]; - _symbolicUbOfLb[i] = -_symbolicLbOfUb[i]; - _symbolicLbOfUb[i] = -temp; - } - } - - // In extreme cases (constraint set externally), _symbolicLbOfLb - // could be negative - so adjust this - if ( _symbolicLbOfLb[i] < 0 ) - _symbolicLbOfLb[i] = 0; - - /* - We now have the tightest bounds we can for the abs - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForLeakyRelu() -{ - ASSERT( _alpha > 0 && _alpha < 1 ); - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - /* - There are two ways we can determine that a LeakyReLU has become fixed: - - 1. If the LeakyReLU's variable has been externally fixed - 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) - */ - PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( FloatUtils::isPositive( _lb[i] ) ) - leakyReluPhase = RELU_PHASE_ACTIVE; - else if ( FloatUtils::isZero( _ub[i] ) ) - leakyReluPhase = RELU_PHASE_INACTIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A LeakyReLU initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - leakyReluPhase = RELU_PHASE_ACTIVE; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - leakyReluPhase = RELU_PHASE_INACTIVE; - } - - if ( leakyReluPhase == PHASE_NOT_FIXED ) - { - // LeakyReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _alpha * sourceLb ) / width; - - if ( _alpha <= 1 ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= coeff; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; - _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - - - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > sourceLb ) - { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb - - // Lower bounds are passed as is - } - else - { - // lambda = 1 - // Symbolic lower bound: x_f >= _alpha x_b - // Concrete lower bound: x_f >= 0 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= _alpha; - } - - _symbolicLowerBias[i] *= _alpha; - } - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= coeff; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; - _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - - if ( sourceUb > sourceLb ) - { - // Upper bounds are passed as is - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - } - - _symbolicUpperBias[i] *= _alpha; - } - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - else - { - // The phase of this LeakyReLU is fixed! - if ( leakyReluPhase == RELU_PHASE_ACTIVE ) - { - // Positive LeakyReLU, bounds are propagated as is - } - else - { - // Negative LeakyReLU, bounds are multiplied by _alpha - _symbolicLbOfLb[i] *= _alpha; - _symbolicUbOfLb[i] *= _alpha; - _symbolicLbOfUb[i] *= _alpha; - _symbolicUbOfUb[i] *= _alpha; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - _symbolicLb[j * _size + i] *= _alpha; - } - - _symbolicLowerBias[i] *= _alpha; - _symbolicUpperBias[i] *= _alpha; - } - } - - if ( _symbolicUbOfUb[i] > sourceUb ) - _symbolicUbOfUb[i] = sourceUb; - if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) - _symbolicLbOfLb[i] = _alpha * sourceLb; - - /* - We now have the tightest bounds we can for the leakyRelu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - - -void Layer::computeSymbolicBoundsForSigmoid() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Sigmoid initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Bounds of lb, ub are the Sigmoids of source lb, ub - double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); - double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); - - // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = 0; - _symbolicUb[j * _size + i] = 0; - } - - _symbolicLbOfUb[i] = sourceUbSigmoid; - _symbolicUbOfUb[i] = sourceUbSigmoid; - _symbolicLbOfLb[i] = sourceLbSigmoid; - _symbolicUbOfLb[i] = sourceLbSigmoid; - - _symbolicUpperBias[i] = sourceUbSigmoid; - _symbolicLowerBias[i] = sourceLbSigmoid; - } - - // Sigmoid not fixed - else - { - double lambda = ( _ub[i] - _lb[i] ) / ( sourceUb - sourceLb ); - double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - - // update lower bound - if ( FloatUtils::isPositive( sourceLb ) ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= lambda; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= lambda; - _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= lambdaPrime; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= lambdaPrime; - _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; - } - - // update upper bound - if ( !FloatUtils::isPositive( sourceUb ) ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= lambda; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= lambda; - _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= lambdaPrime; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= lambdaPrime; - _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; - } - - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - - if ( _symbolicLbOfLb[i] < -1 ) - _symbolicLbOfLb[i] = -1; - if ( _symbolicUbOfUb[i] > 1 ) - _symbolicUbOfUb[i] = 1; - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForRound() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Round initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - - // Bounds of lb, ub are the rounded values of source lb, ub - double sourceUbRound = FloatUtils::round( sourceUb ); - double sourceLbRound = FloatUtils::round( sourceLb ); - - _symbolicLbOfUb[i] = sourceUbRound; - _symbolicUbOfUb[i] = sourceUbRound; - _symbolicLbOfLb[i] = sourceLbRound; - _symbolicUbOfLb[i] = sourceLbRound; - - - // Case when the Round constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) - { - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = sourceUbRound; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = sourceLbRound; - } - - // Round not fixed - else - { - // Symbolic upper bound: x_f <= x_b + 0.5 - // Concrete upper bound: x_f <= round(ub_b) - - _symbolicUpperBias[i] += 0.5; - - // Symbolic lower bound: x_f >= x_b - 0.5 - // Concrete lower bound: x_f >= round(lb_b) - - _symbolicLowerBias[i] -= 0.5; - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForMax() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); - double maxLowerBound = FloatUtils::negativeInfinity(); - double maxUpperBound = FloatUtils::negativeInfinity(); - - Map sourceLbs; - Map sourceUbs; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs[sourceIndex] = sourceLb; - sourceUbs[sourceIndex] = sourceUb; - - if ( maxLowerBound < sourceLb ) - { - indexOfMaxLowerBound = sourceIndex; - maxLowerBound = sourceLb; - } - if ( maxUpperBound < sourceUb ) - { - maxUpperBound = sourceUb; - } - } - - // The phase is fixed if the lower-bound of a source variable x_b is - // larger than the upper-bounds of the other source variables. - bool phaseFixed = true; - for ( const auto &sourceIndex : sources ) - { - if ( sourceIndex != indexOfMaxLowerBound && - FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) - { - phaseFixed = false; - break; - } - } - - if ( phaseFixed ) - { - // Phase fixed - // Symbolic bound: x_b <= x_f <= x_b - // Concrete bound: lb_b <= x_f <= ub_b - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - } - _symbolicLowerBias[i] = - sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; - _symbolicUpperBias[i] = - sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; - - - _symbolicLbOfLb[i] = maxLowerBound; - _symbolicUbOfLb[i] = maxLowerBound; - _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; - _symbolicUbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; - } - else - { - // MaxPool not fixed - // Symbolic bounds: x_b <= x_f <= maxUpperBound - // Concrete bounds: lb_b <= x_f <= maxUpperBound - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = 0; - } - _symbolicLowerBias[i] = - sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; - _symbolicUpperBias[i] = maxUpperBound; - - _symbolicLbOfLb[i] = maxLowerBound; - _symbolicUbOfLb[i] = maxLowerBound; - _symbolicLbOfUb[i] = maxUpperBound; - _symbolicUbOfUb[i] = maxUpperBound; - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForSoftmax() -{ - std::fill_n( _symbolicLowerBias, _size, 0 ); - std::fill_n( _symbolicUpperBias, _size, 0 ); - - double *symbolicLb = new double[_size * _size]; - double *symbolicUb = new double[_size * _size]; - std::fill_n( symbolicLb, _size * _size, 0 ); - std::fill_n( symbolicUb, _size * _size, 0 ); - - double *_work = new double[_size * _size]; - std::fill_n( _work, _size * _size, 0 ); - - Set handledInputNeurons; - unsigned sourceLayerSize = _size; - SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - sourceLayerSize = sourceLayer->getSize(); - ASSERT( sourceLayerSize == _size ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceMids; - Vector targetLbs; - Vector targetUbs; - unsigned len = 0; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceMids.append( ( sourceLb + sourceUb ) / 2 ); - targetLbs.append( _lb[i] ); - targetUbs.append( _ub[i] ); - - ++len; - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &sourceIndex : sources ) - { - if ( handledInputNeurons.exists( sourceIndex._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( sourceIndex._neuron ); - break; - } - } - - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - targetLbs[index] = _lb[i]; - targetUbs[index] = _ub[i]; - - if ( FloatUtils::areEqual( _lb[i], _ub[i] ) ) - { - _symbolicLowerBias[i] = _lb[i]; - _symbolicUpperBias[i] = _ub[i]; - for ( const auto &sourceIndex : sources ) - { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; - } - } - else - { - // Compute symbolic bound - if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) - { - bool useLSE2 = false; - for ( const auto &lb : targetLbs ) - { - if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) - useLSE2 = true; - } - unsigned inputIndex = 0; - if ( !useLSE2 ) - { - _symbolicLowerBias[i] = - softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &sourceIndex : sources ) - { - double dldj = softmaxdLSELowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; - _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - } - else - { - _symbolicLowerBias[i] = - softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &sourceIndex : sources ) - { - double dldj = softmaxdLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; - _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - } - - _symbolicUpperBias[i] = - softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &sourceIndex : sources ) - { - double dudj = softmaxdLSEUpperbound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; - _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - } - else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) - { - _symbolicLowerBias[i] = - softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); - unsigned inputIndex = 0; - for ( const auto &sourceIndex : sources ) - { - double dldj = - softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; - _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - - _symbolicUpperBias[i] = - softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &sourceIndex : sources ) - { - double dudj = - softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; - _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - } - } - } - - for ( const auto &sourceLayerEntry : _sourceLayers ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); - - /* - Perform the multiplication - - newUB = oldUB * posWeights + oldLB * negWeights - newLB = oldUB * negWeights + oldLB * posWeights - */ - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicLb[i] > 0 ) - _work[i] = symbolicLb[i]; - else - _work[i] = 0; - } - // _work is now positive weights in symbolicLb - matrixMultiplication( sourceLayer->getSymbolicLb(), - _work, - _symbolicLb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( sourceLayer->getSymbolicLowerBias(), - _work, - _symbolicLowerBias, - 1, - sourceLayerSize, - _size ); - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicLb[i] < 0 ) - _work[i] = symbolicLb[i]; - else - _work[i] = 0; - } - // _work is now negative weights in symbolicLb - matrixMultiplication( sourceLayer->getSymbolicUb(), - _work, - _symbolicLb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( sourceLayer->getSymbolicUpperBias(), - _work, - _symbolicLowerBias, - 1, - sourceLayerSize, - _size ); - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicUb[i] > 0 ) - _work[i] = symbolicUb[i]; - else - _work[i] = 0; - } - // _work is now positive weights in symbolicUb - matrixMultiplication( sourceLayer->getSymbolicUb(), - _work, - _symbolicUb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( sourceLayer->getSymbolicUpperBias(), - _work, - _symbolicUpperBias, - 1, - sourceLayerSize, - _size ); - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicUb[i] < 0 ) - _work[i] = symbolicUb[i]; - else - _work[i] = 0; - } - // _work is now negative weights in symbolicUb - matrixMultiplication( sourceLayer->getSymbolicLb(), - _work, - _symbolicUb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( sourceLayer->getSymbolicLowerBias(), - _work, - _symbolicUpperBias, - 1, - sourceLayerSize, - _size ); - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - - if ( symbolicLb ) - { - delete[] symbolicLb; - symbolicLb = NULL; - } - if ( symbolicUb ) - { - delete[] symbolicUb; - symbolicUb = NULL; - } - if ( _work ) - { - delete[] _work; - _work = NULL; - } -} - -void Layer::computeSymbolicBoundsForBilinear() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) continue; + } - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - ASSERT( sources.size() == 2 ); + PhaseStatus absPhase = PHASE_NOT_FIXED; - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceLayerSize = sourceLayer->getSize(); const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; - for ( const auto &sourceIndex : sources ) + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; - } + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + if ( sourceLb >= 0 ) + absPhase = ABS_PHASE_POSITIVE; + else if ( sourceUb <= 0 ) + absPhase = ABS_PHASE_NEGATIVE; - if ( allConstant ) + if ( absPhase == PHASE_NOT_FIXED ) { - // If the both source neurons have been eliminated, this neuron is constant + // If we got here, we know that lbOfLb < 0 < ubOfUb. In this case, + // we do naive concretization: lb is 0, ub is the max between + // -lb and ub of the input neuron for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicUb[j * _size + i] = 0; _symbolicLb[j * _size + i] = 0; + _symbolicUb[j * _size + i] = 0; } - _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; - _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; - continue; - } + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = FloatUtils::max( -sourceLb, sourceUb ); - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = _symbolicUpperBias[i]; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; } - - // Symbolic lower bound: - // out >= alpha * x + beta * y + gamma - // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y - - // Symbolic upper bound: - // out <= alpha * x + beta * y + gamma - // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y - for ( unsigned j = 0; j < _inputLayerSize; ++j ) + else { - if ( sourceLbs[1] >= 0 ) - { - _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - - if ( sourceUbs[1] >= 0 ) - { - _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - - if ( sourceLbs[0] >= 0 ) + // The phase of this AbsoluteValueConstraint is fixed! + if ( absPhase == ABS_PHASE_POSITIVE ) { - _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + // Positive AbsoluteValue, bounds are propagated as is } else { - _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - } - } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; + // Negative AbsoluteValue, bounds are negated and flipped + double temp; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + temp = _symbolicUb[j * _size + i]; + _symbolicUb[j * _size + i] = -_symbolicLb[j * _size + i]; + _symbolicLb[j * _size + i] = -temp; + } - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } + temp = _symbolicLowerBias[i]; + _symbolicLowerBias[i] = -_symbolicUpperBias[i]; + _symbolicUpperBias[i] = -temp; - entry = _symbolicUb[j * _size + i]; + // Old lb, negated, is the new ub + temp = _symbolicLbOfLb[i]; + _symbolicLbOfLb[i] = -_symbolicUbOfUb[i]; + _symbolicUbOfUb[i] = -temp; - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); + temp = _symbolicUbOfLb[i]; + _symbolicUbOfLb[i] = -_symbolicLbOfUb[i]; + _symbolicLbOfUb[i] = -temp; } } + // In extreme cases (constraint set externally), _symbolicLbOfLb + // could be negative - so adjust this + if ( _symbolicLbOfLb[i] < 0 ) + _symbolicLbOfLb[i] = 0; + /* - We now have the tightest bounds we can for the relu + We now have the tightest bounds we can for the abs variable. If they are tigheter than what was previously known, store them. */ @@ -3397,317 +1623,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - return std::exp( inputs[i] ) / sum; -} - -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = 0; - if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); - - double ldi = inputLbs[di]; - double udi = inputUbs[di]; - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; - - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); - - return val; -} - -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); - else - { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} - -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); - - if ( i == di ) - { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); - } - else if ( maxInputIndex == di ) - { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } - } - return -val + val2 * sum2; - } - else - { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} - -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - - ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * - SoftmaxConstraint::logSumOfExponential( inputTilda ) ); -} - -double Layer::softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); - - double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); - if ( i == di ) - val2 -= 1; - - return val * val2; -} - -double Layer::softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - if ( i == j ) - sum += 1; - else - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - double xjTilda = inputTilda[j]; - - sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + - ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); - } - } - - return 1 / sum; -} - -double Layer::softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); - - if ( i != di ) - { - double ldiTilda = inputLbs[di] - inputUbs[i]; - double udiTilda = inputUbs[di] - inputLbs[i]; - return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / - ( udiTilda - ldiTilda ); - } - else - { - double val2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j != i ) - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); - } - } - return val * val * val2; - } -} - -double Layer::softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); -} - -double Layer::softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - - if ( i == di ) - { - double val2 = -1; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - val2 += std::exp( inputMids[j] - inputMids[i] ); - return li * ui * val2; - } - else - return -li * ui * std::exp( inputMids[di] - inputMids[i] ); -} - -double Layer::softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector uTilda; - SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); - uTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); -} - -double Layer::softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector lTilda; - SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); - lTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); -} - void Layer::eliminateVariable( unsigned variable, double value ) { if ( !_variableToNeuron.exists( variable ) ) diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 900276eda..e52ea0103 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -208,88 +208,14 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - /* - The following methods compute concrete softmax output bounds - using different linear approximation, as well as the coefficients - of softmax inputs in the symbolic bounds - */ - double softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ); - - double softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ); - - double softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ); - - double softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ); - - double softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - /* Helper functions for symbolic bound tightening */ - void computeSymbolicBoundsForInput(); + void comptueSymbolicBoundsForInput(); void computeSymbolicBoundsForRelu(); void computeSymbolicBoundsForSign(); void computeSymbolicBoundsForAbsoluteValue(); void computeSymbolicBoundsForWeightedSum(); - void computeSymbolicBoundsForMax(); - void computeSymbolicBoundsForLeakyRelu(); - void computeSymbolicBoundsForSigmoid(); - void computeSymbolicBoundsForRound(); - void computeSymbolicBoundsForSoftmax(); - void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); /* @@ -299,12 +225,6 @@ class Layer void computeIntervalArithmeticBoundsForRelu(); void computeIntervalArithmeticBoundsForAbs(); void computeIntervalArithmeticBoundsForSign(); - void computeIntervalArithmeticBoundsForMax(); - void computeIntervalArithmeticBoundsForLeakyRelu(); - void computeIntervalArithmeticBoundsForSigmoid(); - void computeIntervalArithmeticBoundsForRound(); - void computeIntervalArithmeticBoundsForSoftmax(); - void computeIntervalArithmeticBoundsForBilinear(); const double *getSymbolicLb() const; const double *getSymbolicUb() const; diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 35024b45c..33b0aff13 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -14,7 +14,6 @@ **/ #include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" @@ -189,243 +188,161 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + void test_evaluate_relus() { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + NLR::NetworkLevelReasoner nlr; - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + populateNetwork( nlr ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + double input[2]; + double output[2]; - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); } - void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + void test_evaluate_sigmoids() { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + NLR::NetworkLevelReasoner nlr; - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + populateNetworkWithSigmoids( nlr ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + double input[2]; + double output[2]; - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + // case 1 + input[0] = 0; + input[1] = 0; - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + // case 2 + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + // case 3 + input[0] = 1; + input[1] = 2; - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + void test_evaluate_non_consecutive_layers() { - /* - a - x d f - b - y e g - c - */ + NLR::NetworkLevelReasoner nlr; // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); - // Set the weights and biases for the weighted sum layers + // Set the weights and relus nlr.setWeight( 0, 0, 1, 0, 1 ); nlr.setWeight( 0, 0, 1, 1, 2 ); nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + // Evaluate + double input[2]; + double output; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + input[0] = -1; + input[1] = 2; - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); } - void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + void test_evaluate_relus_and_abs() { - /* - a - x d f - b - y e g - c - */ + NLR::NetworkLevelReasoner nlr; // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); @@ -441,7 +358,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 2, 1, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 1, 1 ); nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 0, 5, 1, 1 ); @@ -450,7 +367,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the LeakyReLU sources + // Mark the ReLU/Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); @@ -458,335 +375,257 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + double input[2]; + double output[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + input[0] = 1; + input[1] = 2; - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + void test_store_into_other() { - /* - a - x b e - g - y c f - d - */ + NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::MAX, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + populateNetwork( nlr ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + NLR::NetworkLevelReasoner nlr2; - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setWeight( 4, 0, 5, 0, -1 ); + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Mark the Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + void test_store_into_other_with_sigmoids() { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + NLR::NetworkLevelReasoner nlr; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + populateNetworkWithSigmoids( nlr ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + NLR::NetworkLevelReasoner nlr2; - // Mark the Softmax sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + double input[2]; + double output1[2]; + double output2[2]; - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + // case 1 + input[0] = 0; + input[1] = 0; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + // case 2 + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + void test_interval_arithmetic_bound_propagation_relu_constraints() { - /* - a - x b e - g - y c f - d - */ + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setTableau( &tableau ); - nlr.setWeight( 4, 0, 5, 0, -1 ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - // Mark the Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); - void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( bounds.exists( bound ) ); } - void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + void test_interval_arithmetic_bound_propagation_abs_constraints() { - /* - a - x d f - b - y e g - c - */ + NLR::NetworkLevelReasoner nlr; // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies @@ -813,7 +652,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Round/Sign sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); @@ -821,6 +660,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -841,225 +687,140 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + nlr.setTableau( &tableau ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), - // Mark the LeakyReLU/Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( bounds.exists( bound ) ); } - void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* 2 R 1 @@ -1124,307 +885,332 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 6, large ); } - void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_sbt_relus_all_active() { - /* - -1 - __________________ - / \ - / 1 R -1 1 R 3 1 - x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 - \ / - \ 3 / - \________________________/ + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - */ + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 2, NLR::Layer::RELU, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 4, NLR::Layer::RELU, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 1, 5 ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 4, 0, 5, 0, 3 ); - nlr.setWeight( 0, 0, 3, 0, -1 ); - nlr.setWeight( 1, 0, 5, 0, 3 ); + /* + Input ranges: + x0: [4, 6] + x1: [1, 5] - nlr.setBias( 3, 0, 1 ); - nlr.setBias( 5, 0, 1 ); + Layer 1: - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - nlr.addActivationSource( 3, 0, 4, 0 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + Both ReLUs active, bound survive through activations: - // Very loose bounds for neurons except inputs - double large = 1000000; + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] - tableau.getBoundManager().initialize( 6 ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, large ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - } + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - -1 - __________________ - / \ - / 1 R -1 1 R 3 1 1 - x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 - \ / - \ 1 / - \_______________________________/ + Layer 2: + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] */ - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 2, NLR::Layer::RELU, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 4, NLR::Layer::RELU, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - // Mark layer dependencies - for ( unsigned i = 1; i <= 6; ++i ) - nlr.addLayerDependency( i - 1, i ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 0, 5 ); + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 4, 0, 5, 0, 3 ); - nlr.setWeight( 0, 0, 3, 0, -1 ); - nlr.setWeight( 0, 0, 5, 0, 1 ); - nlr.setWeight( 5, 0, 6, 0, 1 ); + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); - nlr.setBias( 3, 0, 1 ); - nlr.setBias( 5, 0, 1 ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } - nlr.addActivationSource( 3, 0, 4, 0 ); + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Very loose bounds for neurons except inputs - double large = 1000000; + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, large ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { /* + Input ranges: - 1 1 1 1 - x0 --- x2 x5 --- x6 x9 --- x10 - \ /\ /\ / \ / \ / - 1 \ / R\ /-1\ / R \ / 1 \ / - \/ \/ \/ \/ \/ - /\ /\ /\ /\ /\ - 1 / \ R/ \ 1/ \ R / \ 1 / \ - / \/ \/ \ / \ / 0 \ - x1 --- x3 x4 --- x7 x8 --- x11 - -1 1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - */ + x0: [4, 6] + x1: [1, 5] - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + Layer 1: - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 0 ); + x4.lb = 0 + x4.ub = 0 - nlr.setBias( 5, 0, 1 ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 0 ); + Layer 2: - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Very loose bounds for neurons except inputs - double large = 1000000; + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_sbt_relus_active_and_externally_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: - 1 LR 1 LR 1 1 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 1 \ / 0 \ / - \/ \/ \/ - /\ /\ /\ - 1 / \ 1 / \ 1 / \ - / \ LR / \ LR / 1 \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - -1 -1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - using LeakyReLU activation instead of ReLU + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] */ + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.2 ); - nlr.getLayer( 4 )->setAlpha( 0.2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) + for ( unsigned i = 1; i <= 3; ++i ) nlr.addLayerDependency( i - 1, i ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 0 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - - nlr.setBias( 5, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the LeakyReLU sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -1436,18 +1222,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 12 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -1458,157 +1236,113 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 S 1 Rd - x0 --- x2 ---> x4 --- x6 --- x8 - \ / \ / - 1 \ / 1 \ / - \/ \/ - /\ /\ - 1 / \ 1 / \ - / \ S / \ Rd - x1 --- x3 ---> x5 --- x7 --- x9 - -1 -1 - - */ + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 4; ++i ) - nlr.addLayerDependency( i - 1, i ); + /* + Input ranges: - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); + x0: [4, 6] + x1: [1, 5] - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); + Layer 1: - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Both absolute values positive, bound survive through activations: - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + Layer 2: - // Very loose bounds for neurons except inputs - double large = 1000000; + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ - tableau.getBoundManager().initialize( 10 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_sbt_abs_positive_and_negative() { - /* - - 1 R Max 2 - x0 --- x2 ---> x4 --- x6 ---> x7 - \ / / - 1 \ / / - \/ / - /\ / - 1 / \ / - / \ R / - x1 --- x3 ---> x5 - -1 + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - */ + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::MAX, 1 ); - nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 4; ++i ) + for ( unsigned i = 1; i <= 3; ++i ) nlr.addLayerDependency( i - 1, i ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - nlr.setWeight( 3, 0, 4, 0, 2 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - // Mark the Max sources - nlr.addActivationSource( 2, 0, 3, 0 ); - nlr.addActivationSource( 2, 1, 3, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 8 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -1619,94 +1353,119 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - } - void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + Layer 1: - x0 x3 S x6 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - x1 x4 S x7 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - x2 x5 S x8 + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] - x6 x7 x8 = softmax(x3, x4, x5) + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 + Layer 2: + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] */ + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies for ( unsigned i = 1; i <= 3; ++i ) nlr.addLayerDependency( i - 1, i ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -1715,197 +1474,104 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - } - - void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - x0 x3 S x8 + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - x1 x4 S x9 + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - x2 x5 S x10 + /* + Input ranges: - x6 S x11 + x0: [4, 6] + x1: [1, 5] - x7 S x12 + Layer 1: - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 - x6 = -x0 - x1 - x2 + 2 - x7 = -x0 - x1 - x2 + 1 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - x8 x10 x12 = softmax(x3, x5, x7) + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - x9 x11 = softmax(x4, x6) + First absolute value is undecided, bounds are concretized. + Second ReLU is active, bounds surive the activation - x13 = x8 + x10 + x12 - x14 = -x8 - x10 - x12 - x15 = x9 + x11 - x16 = -x9 - x11 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 - */ + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + Layer 2: - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 0, 1, 3, -1 ); - nlr.setWeight( 0, 0, 1, 4, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 3, -1 ); - nlr.setWeight( 0, 1, 1, 4, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 0, 2, 1, 4, -1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 4, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 2, 4, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 2, 1 ); - nlr.setWeight( 2, 3, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 3, -1 ); - nlr.setWeight( 2, 3, 3, 3, -1 ); + x6 range: [-11, 7] + */ - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); - nlr.setBias( 1, 3, 2 ); - nlr.setBias( 1, 4, 1 ); + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 4, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 4, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 4 ); - nlr.addActivationSource( 1, 2, 2, 4 ); - nlr.addActivationSource( 1, 4, 2, 4 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); - // Very loose bounds for neurons except inputs - double large = 1000000; + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - tableau.getBoundManager().initialize( 17 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_sbt_absolute_values_active_and_externally_fixed() { - /* - - - x0 x2 - x x4 -- x5 - x1 x3 - - x2 = x0 - 2 * x1 - x3 = x0 + x1 - x4 = -x5 + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - x4 = x2 * x3 - */ + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies for ( unsigned i = 1; i <= 3; ++i ) nlr.addLayerDependency( i - 1, i ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - + nlr.addActivationSource( 1, 1, 2, 1 ); // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); @@ -1915,13 +1581,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 6 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -1930,10276 +1596,616 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 4, large ); tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); - } + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU - */ + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + /* + Input ranges: - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); + x0: [4, 6] + x1: [1, 5] - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); + Layer 1: - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); + x2 is eliminated, everything set to -3 - nlr.setBias( 5, 1, 2 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + Second absolute value is positive, bounds surive the activation - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + x4: all set to 3 - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Layer 2: - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + List expectedBounds( { + // x2 does not appear, because it has been eliminated - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - // Very loose bounds for neurons except inputs - double large = 1000000; + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + printf( "Dumpign discovered bounds:\n" ); + for ( const auto &bound : bounds ) + bound.dump(); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_generate_input_query() { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 - */ + NLR::NetworkLevelReasoner nlr; // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); + // Variable indexing + + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); + + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); - // Mark the ReLU sources + // Mark the ReLU/Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + double input[2]; + double output[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + input[0] = 1; + input[1] = 1; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + input[0] = 1; + input[1] = 2; - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + void test_simulate_relus() + { + NLR::NetworkLevelReasoner nlr; - // Very loose bounds for neurons except inputs - double large = 1000000; + populateNetwork( nlr ); - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU - */ + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); - nlr.setBias( 5, 1, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + populateNetworkWithSigmoids( nlr ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - // Very loose bounds for neurons except inputs - double large = 1000000; + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } } - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_simulate_non_consecutive_layers() { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 - */ + NLR::NetworkLevelReasoner nlr; // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sigmoid sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); + nlr.addActivationSource( 0, 0, 4, 2 ); - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 16 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - } - - void test_evaluate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - double input[2]; - double output[2]; - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } - - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - double input[2]; - double output[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - double input[2]; - double output[2]; - - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); - } - - void test_evaluate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - double input[2]; - double output[2]; - - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - double input[2]; - double output[2]; - - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); - - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - double input[2]; - double output[1]; - - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - - // With Max, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - - // With Max, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - double input[2]; - double output[2]; - - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - double input[2]; - double output[1]; - - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); - } - - void test_evaluate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } - - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - - input[0] = 1.6; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } - - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_simulate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } - - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } - } - - void test_simulate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } - - // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } - - // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } - } - - void test_simulate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Round, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); - } - - // With Round, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); - } - } - - void test_simulate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Sign, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Sign, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Sign, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Abs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Abs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Abs, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); - } - } - - void test_simulate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Leaky ReLU, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.9, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.57, - 0.0001 ) ); - } - - // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -0.04, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -0.76, - 0.0001 ) ); - } - } - - void test_simulate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Max, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -3 ) ); - } - - // With Max, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -18 ) ); - } - - // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -5 ) ); - } - } - - void test_simulate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Softmax, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.2999, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.4001, - 0.0001 ) ); - } - - // With Softmax, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } - - // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7588, - 0.0001 ) ); - } - } - - void test_simulate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Bilinear, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - // With Bilinear, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2.8304, - 0.0001 ) ); - } - - // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, - 0.0001 ) ); - } - } - - void test_simulate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - void test_simulate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - } - - void test_simulate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } - - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } - - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } - } - - void test_simulate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } - - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } - } - - void test_simulate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } - - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - } - - void test_interval_arithmetic_bound_propagation_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_constraints() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sign_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), - - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), - - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), - - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), - - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), - - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), - - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_max_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), - - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), - - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), - - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), - - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), - - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), - Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), - Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), - Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), - Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), - Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), - Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), - Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), - Tightening( 11, -0.3192, Tightening::UB ) } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), - Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), - Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), - Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), - Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), - Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), - Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), - Tightening( 11, -0.0654, Tightening::UB ) } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both ReLUs active, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_residual1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual1( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - - Layer 1: - - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] - - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): - - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] - - Layer 3 (with residual from x1): - - x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - - x5 range: [-2, 4] - */ - - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_residual2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - - Layer 1: - - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] - - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): - - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] - - Layer 3 (with residual from x0): - - x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 - = 7.5] x5 range: [0, 7.5] - - Layer 4: - x6.lb = 1x0 + 1 : [0, 2] - x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x6 range: [0, 7.5] - */ - - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 7.5, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 7.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_reindex() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluReindex( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - - x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 - x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 - x4 range: [0, 2] - - x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 - x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 - x5 range: [0, 2] - - Layer 2: - - x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] - x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] - x6 range: [-1, 3] - - x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] - x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] - x7 range: [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 - Coefficient (first ReLU, upper): 1 (propagated as is) - Coefficient (second ReLU, lower): 0 (bound is zero) - Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - - x8.lb = 0.5 ( x0 ) = 0.5x0 - x8.ub = x0 + 2 - x8 range: [0, 3] - - x9.lb = 0 - x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 - x9 range: [0, 2] - - Layer 3: - - x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] - x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] - x10 range: [0.5, 6] - - x11.lb = 0.5x1 - 0.5 - x11.ub = 0.5x1 + 1.5 - x11 range: [0, 2] - - */ - - List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_absolute_values_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second absolute value is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_signs_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is undecided, bounds are concretized. - Second sign is active, bounds become constant 1 - Coefficient (first Sign, lower): 2/12 = 1/6. - Coefficient (first Sign, upper): -2/-4 = 1/2. - - x4 range: [-1, 1] - x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 - x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - - x5 range: [1, 1] - x5.lb = 1 - x5.ub = 1 - - Layer 2: - - x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] - x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - - x6 range: [-8/3, 6] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2.6667, Tightening::LB ), - Tightening( 6, 6, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_signs_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is negative, bounds become constant -1 - Second sign is positive, bounds become constant 1 - - x4: all set to -1 - - x5: all set to 1 - - Layer 2: - - x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 - x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, -1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 - Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - - x4.lb = x0 + x1 - x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 - x4 range: [-0.4, 2] - - x5.lb = x0 - x1 - x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 - x5 range: [-0.4, 2] - - Layer 2: - - x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] - x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] - x6 range: [-2, 2.8] - - x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] - x7 range: [-2.8, 2.8] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 - Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - - Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 - Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - - x8.lb = 2x0 - x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 - x8 range: [-0.4, 2.8] - - x9.lb = 0.4x0 + 1.6x1 - 0.8 - x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 - x9 range: [-0.56, 2.8] - - Layer 3: - - x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : - [3.08, 6.12] x10 range: [-3.8, 6.12] - - x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] - x11 range: [-2.8, 2.8] - - */ - - List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_sigmoids_and_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - // Layer 1 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - - // Layer 2 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - - // Layer 3 - /* - Double-check with Python - --- - from math import exp as e - def g(x): - return 1 / (1 + e(-x)) - - def g_prime(x): - return g(x) * (1 - g(x)) - - def lam(l, u): - return (g(u) - g(l)) / (u - l) - - def lam_prime(l, u): - return min(g_prime(l), g_prime(u)) - - l3 = l4 = -2 - u3 = u4 = 2 - l5 = l6 = g(-2) - u5 = u6 = g(2) - lambda7 = lam(l3, u3) - lambda7_prime = lam_prime(l3, u3) - lambda8 = lam(l4, u4) - lambda8_prime = lam_prime(l4, u4) - x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) - x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) - x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) - x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) - print(x7_l) - print(x7_u) - print(x8_l) - print(x8_u) - --- - [output]: - 0.4483930148512481 - 1.5516069851487517 - -0.5516069851487517 - 0.5516069851487517 - */ - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); - - // Layer 4 - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); - } - - void test_sbt_max_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 2] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 3] - x2.ub = x0 + x1 : [-2, 3] - - x3.lb = x0 - x1 : [-3, 2] - x3.ub = x0 - x1 : [-3, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 - Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 - - x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 - x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 - x4 range: [0, 3] - - x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 - x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 - x5 range: [0, 2] - - Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub - Max inherits lower bound from x4, and its upper bound is constant 3. - - x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] - x6.ub = 3 : [3, 3] - x6 range: [-1.2, 3] - - Layer 3: - - x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] - x7.ub = 2 ( 3 ) = 6 : [6, 6] - x7 range: [-2.4, 6] - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 3, Tightening::UB ), - Tightening( 3, -3, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2.4, Tightening::LB ), - Tightening( 7, 6, Tightening::UB ), - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_max_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -3 ); - tableau.setUpperBound( 1, -2 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-3, -2] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 0] - x2.ub = x0 + x1 : [-2, 0] - - x3.lb = x0 - x1 : [3, 5] - x3.ub = x0 - x1 : [3, 5] - - First ReLU is negative, bounds become constant 0 - Second ReLU is positive, bounds survive the activation - - x4: all set to 0 - - x5.lb = x0 - x1 : [3, 5] - x5.ub = x0 - x1 : [3, 5] - - Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - - x6.lb = x0 - x1 : [3, 5] - x6.ub = x0 - x1 : [3, 5] - - Layer 3: - - x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 0, Tightening::UB ), - Tightening( 3, 3, Tightening::LB ), - Tightening( 3, 5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 3, Tightening::LB ), - Tightening( 5, 5, Tightening::UB ), - Tightening( 6, 3, Tightening::LB ), - Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 6, Tightening::LB ), - Tightening( 7, 10, Tightening::UB ), - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_softmax1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - } - - void test_sbt_softmax2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - } - - void test_sbt_softmax3() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax2( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.00001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.00001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.00001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - - List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; - - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } - - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); - } - - void test_sbt_bilinear() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -2 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-2, 1] - - Layer 1: - - x2.lb = x0 - 2x1 : [-1, 6] - x2.ub = x0 - 2x1 : [-1, 6] - - x3.lb = x0 + x1 : [-1, 3] - x3.ub = x0 + x1 : [-1, 3] - - Coefficients for bilinear layer: - Lower bound: - alpha_l = x3.lb = -1 - beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 - - Upper bound: - alpha_u = x3.ub = 3 - beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - - x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] - x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] - x4 range: [-7, 21] - - Layer 3: - - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] - */ - - List expectedBounds( { Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 6, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -7, Tightening::LB ), - Tightening( 4, 21, Tightening::UB ), - Tightening( 5, -21, Tightening::LB ), - Tightening( 5, 7, Tightening::UB ) } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_concretize_input_assignment() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); - } - - - void test_obtain_bound_from_ipq() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_backwards_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), - - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - List expectedBounds2( {} ); + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + void test_simulate_relus_and_abs() + { + NLR::NetworkLevelReasoner nlr; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } } - void test_backwards_relu_and_bilinear2() + void test_concretize_input_assignment() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); + populateNetwork( nlr ); + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + Map assignment; - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); + void test_obtain_bound_from_ipq() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + Query query; + query.setNumberOfVariables( 14 ); - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() != expectedBounds.size() ) - return false; - - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; - } - - void updateTableau( MockTableau &tableau, List &tightenings ) - { - ASSERT( tableau ); - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - { - tableau.setLowerBound( tightening._variable, tightening._value ); - } - if ( tightening._type == Tightening::UB ) - { - tableau.setUpperBound( tightening._variable, tightening._value ); - } - } + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); } void test_get_previous_bias()