From b568896825d45eb61a6fecca6d9942138f0eaa45 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 10 Jul 2017 15:30:33 +0530
Subject: [PATCH 01/33] - Refactoring of ct.bt - Refactoring of code to avoid
 duplicate methods. Removed TestUtil.scala - Added some exception handling in
 northwind.bt

---
 ...te_and_load_evictionOverflow_row_table.sql | 105 +++
 ...d_persistentEvictionOverflow_row_table.sql | 111 +++
 .../src/resources/scripts/ct/ct_queries.sql   | 106 +--
 .../snappydata/hydra/cluster/SnappyTest.java  |   1 +
 .../test/java/io/snappydata/hydra/ct/ct.bt    | 141 +---
 .../test/java/io/snappydata/hydra/ct/ctHA.bt  |  90 +++
 .../java/io/snappydata/hydra/ct/ctMisc.bt     | 169 +++++
 .../io/snappydata/hydra/ct/ctSplitMode.conf   |   4 +-
 .../hydra/ct/ctSplitModeCreateWithScript.conf |   4 +-
 .../ct/ctSplitModeCreateWithSparkApp.conf     |   4 +-
 .../io/snappydata/hydra/SnappyTestUtils.scala | 243 ++++--
 .../scala/io/snappydata/hydra/TestUtil.scala  | 299 --------
 .../hydra/concurrency/ConcTestUtils.scala     | 114 +--
 .../io/snappydata/hydra/ct/CTQueries.scala    |  94 ++-
 .../io/snappydata/hydra/ct/CTTestUtil.scala   | 172 +++--
 .../hydra/ct/ValidateCTQueriesApp.scala       |   3 +-
 .../hydra/ct/ValidateCTQueriesJob.scala       |   6 +-
 .../hydra/northwind/NWTestUtil.scala          | 713 ++++++++----------
 .../northwind/ValidateNWQueriesApp.scala      |  11 +-
 .../northwind/ValidateNWQueriesJob.scala      |   9 +-
 .../SnapshotIsolationTestUtils.scala          |  18 +-
 21 files changed, 1343 insertions(+), 1074 deletions(-)
 create mode 100644 dtests/src/resources/scripts/ct/ct_create_and_load_evictionOverflow_row_table.sql
 create mode 100644 dtests/src/resources/scripts/ct/ct_create_and_load_persistentEvictionOverflow_row_table.sql
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt
 delete mode 100644 dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala

diff --git a/dtests/src/resources/scripts/ct/ct_create_and_load_evictionOverflow_row_table.sql b/dtests/src/resources/scripts/ct/ct_create_and_load_evictionOverflow_row_table.sql
new file mode 100644
index 0000000000..fdc1fec1d7
--- /dev/null
+++ b/dtests/src/resources/scripts/ct/ct_create_and_load_evictionOverflow_row_table.sql
@@ -0,0 +1,105 @@
+-- DROP TABLE IF ALREADY EXISTS --
+DROP TABLE IF EXISTS ORDERS_DETAILS;
+DROP TABLE IF EXISTS staging_orders_details;
+
+-- CREATE PARTITIONED ROW TABLE ORDERS_DETAILS --
+CREATE EXTERNAL TABLE staging_orders_details USING com.databricks.spark.csv
+             OPTIONS (path ':dataLocation/ORDERS_DETAILS.dat', header 'true', inferSchema 'false',
+              nullValue 'NULL');
+
+CREATE TABLE ORDERS_DETAILS
+             (SINGLE_ORDER_DID BIGINT ,SYS_ORDER_ID VARCHAR(64) ,SYS_ORDER_VER INTEGER ,DATA_SNDG_SYS_NM VARCHAR(128) ,
+             SRC_SYS VARCHAR(20) ,SYS_PARENT_ORDER_ID VARCHAR(64) ,SYS_PARENT_ORDER_VER SMALLINT ,PARENT_ORDER_TRD_DATE VARCHAR(20),
+             PARENT_ORDER_SYS_NM VARCHAR(128) ,SYS_ALT_ORDER_ID VARCHAR(64) ,TRD_DATE VARCHAR(20),GIVE_UP_BROKER VARCHAR(20) ,
+             EVENT_RCV_TS TIMESTAMP ,SYS_ROOT_ORDER_ID VARCHAR(64) ,GLB_ROOT_ORDER_ID VARCHAR(64) ,GLB_ROOT_ORDER_SYS_NM VARCHAR(128) ,
+             GLB_ROOT_ORDER_RCV_TS TIMESTAMP ,SYS_ORDER_STAT_CD VARCHAR(20) ,SYS_ORDER_STAT_DESC_TXT VARCHAR(120) ,DW_STAT_CD VARCHAR(20) ,
+             EVENT_TS TIMESTAMP,ORDER_OWNER_FIRM_ID VARCHAR(20),RCVD_ORDER_ID VARCHAR(64) ,EVENT_INITIATOR_ID VARCHAR(64),
+             TRDR_SYS_LOGON_ID VARCHAR(64),SOLICITED_FG  VARCHAR(1),RCVD_FROM_FIRMID_CD VARCHAR(20),RCV_DESK VARCHAR(20),
+             SYS_ACCT_ID_SRC VARCHAR(64) ,CUST_ACCT_MNEMONIC VARCHAR(128),CUST_SLANG VARCHAR(20) ,SYS_ACCT_TYPE VARCHAR(20) ,
+             CUST_EXCH_ACCT_ID VARCHAR(64) ,SYS_SECURITY_ALT_ID VARCHAR(64) ,TICKER_SYMBOL VARCHAR(32) ,TICKER_SYMBOL_SUFFIX VARCHAR(20) ,
+             PRODUCT_CAT_CD VARCHAR(20) ,SIDE VARCHAR(20) ,LIMIT_PRICE DECIMAL(38, 18),STOP_PRICE DECIMAL(38, 18),ORDER_QTY DECIMAL(18, 4) ,
+             TOTAL_EXECUTED_QTY DECIMAL(18, 4) ,AVG_PRICE DECIMAL(38, 18) ,DAY_EXECUTED_QTY DECIMAL(18, 4) ,DAY_AVG_PRICE DECIMAL(38, 18) ,
+             REMNG_QTY DECIMAL(18, 4) ,CNCL_QTY DECIMAL(18, 4) ,CNCL_BY_FG  VARCHAR(1) ,EXPIRE_TS TIMESTAMP ,EXEC_INSTR VARCHAR(64) ,
+             TIME_IN_FORCE VARCHAR(20) ,RULE80AF  VARCHAR(1) ,DEST_FIRMID_CD VARCHAR(20) ,SENT_TO_CONDUIT VARCHAR(20) ,SENT_TO_MPID VARCHAR(20) ,
+             RCV_METHOD_CD VARCHAR(20) ,LIMIT_ORDER_DISP_IND  VARCHAR(1) ,MERGED_ORDER_FG  VARCHAR(1) ,MERGED_TO_ORDER_ID VARCHAR(64) ,
+             RCV_DEPT_ID VARCHAR(20) ,ROUTE_METHOD_CD VARCHAR(20) ,LOCATE_ID VARCHAR(256) ,LOCATE_TS TIMESTAMP ,LOCATE_OVERRIDE_REASON VARCHAR(2000) ,
+             LOCATE_BROKER VARCHAR(256) ,ORDER_BRCH_SEQ_TXT VARCHAR(20) ,IGNORE_CD VARCHAR(20) ,CLIENT_ORDER_REFID VARCHAR(64) ,
+             CLIENT_ORDER_ORIG_REFID VARCHAR(64) ,ORDER_TYPE_CD VARCHAR(20) ,SENT_TO_ORDER_ID VARCHAR(64) ,ASK_PRICE DECIMAL(38, 18) ,
+             ASK_QTY DECIMAL(18, 4) ,BID_PRICE DECIMAL(38, 18) ,BID_QTY DECIMAL(18, 4) ,REG_NMS_EXCEP_CD VARCHAR(20) ,REG_NMS_EXCEP_TXT VARCHAR(2000) ,
+             REG_NMS_LINK_ID VARCHAR(64) ,REG_NMS_PRINTS  VARCHAR(1) ,REG_NMS_STOP_TIME TIMESTAMP ,SENT_TS TIMESTAMP ,RULE92  VARCHAR(1) ,
+             RULE92_OVERRIDE_TXT VARCHAR(2000) ,RULE92_RATIO DECIMAL(25, 10) ,EXMPT_STGY_BEGIN_TIME TIMESTAMP ,EXMPT_STGY_END_TIME TIMESTAMP ,
+             EXMPT_STGY_PRICE_INST VARCHAR(2000) ,EXMPT_STGY_QTY DECIMAL(18, 4) ,CAPACITY VARCHAR(20) ,DISCRETION_QTY DECIMAL(18, 4) ,
+             DISCRETION_PRICE VARCHAR(64) ,BRCHID_CD VARCHAR(20) ,BASKET_ORDER_ID VARCHAR(64) ,PT_STRTGY_CD VARCHAR(20) ,
+             SETL_DATE VARCHAR(20),SETL_TYPE VARCHAR(20) ,SETL_CURR_CD VARCHAR(20) ,SETL_INSTRS VARCHAR(2000) ,COMMENT_TXT VARCHAR(2000) ,
+             CHANNEL_NM VARCHAR(128) ,FLOW_CAT VARCHAR(20) ,FLOW_CLASS VARCHAR(20) ,FLOW_TGT VARCHAR(20) ,ORDER_FLOW_ENTRY VARCHAR(20) ,
+             ORDER_FLOW_CHANNEL VARCHAR(20) ,ORDER_FLOW_DESK VARCHAR(20) ,FLOW_SUB_CAT VARCHAR(20) ,STRTGY_CD VARCHAR(20) ,RCVD_FROM_VENDOR VARCHAR(20) ,
+             RCVD_FROM_CONDUIT VARCHAR(20) ,SLS_PERSON_ID VARCHAR(64) ,SYNTHETIC_FG  VARCHAR(1) ,SYNTHETIC_TYPE VARCHAR(20) ,FXRT DECIMAL(25, 8) ,
+             PARENT_CLREFID VARCHAR(64) ,REF_TIME_ID INTEGER ,OPT_CONTRACT_QTY DECIMAL(18, 4) ,OCEAN_PRODUCT_ID BIGINT ,CREATED_BY VARCHAR(64) ,
+             CREATED_DATE TIMESTAMP ,FIRM_ACCT_ID BIGINT ,DEST VARCHAR(20) ,CNTRY_CD VARCHAR(20) ,DW_SINGLE_ORDER_CAT VARCHAR(20) ,CLIENT_ACCT_ID BIGINT ,
+             EXTERNAL_TRDR_ID VARCHAR(64) ,ANONYMOUS_ORDER_FG  VARCHAR(1) ,SYS_SECURITY_ALT_SRC VARCHAR(20) ,CURR_CD VARCHAR(20) ,
+             EVENT_TYPE_CD VARCHAR(20) ,SYS_CLIENT_ACCT_ID VARCHAR(64) ,SYS_FIRM_ACCT_ID VARCHAR(20) ,SYS_TRDR_ID VARCHAR(64) ,DEST_ID INTEGER ,
+             OPT_PUT_OR_CALL VARCHAR(20) ,SRC_FEED_REF_CD VARCHAR(64) ,DIGEST_KEY VARCHAR(128) ,EFF_TS TIMESTAMP ,ENTRY_TS TIMESTAMP ,
+             OPT_STRIKE_PRICE DECIMAL(38, 18) ,OPT_MATURITY_DATE VARCHAR(20) ,ORDER_RESTR VARCHAR(4) ,SHORT_SELL_EXEMPT_CD VARCHAR(4) ,
+             QUOTE_TIME TIMESTAMP ,SLS_CREDIT VARCHAR(20) ,SYS_SECURITY_ID VARCHAR(64) ,SYS_SECURITY_ID_SRC VARCHAR(20) ,SYS_SRC_SYS_ID VARCHAR(20) ,
+             SYS_ORDER_ID_UNIQUE_SUFFIX VARCHAR(20) ,DEST_ID_SRC VARCHAR(4) ,GLB_ROOT_SRC_SYS_ID VARCHAR(20) ,GLB_ROOT_ORDER_ID_SUFFIX VARCHAR(64) ,
+             SYS_ROOT_ORDER_ID_SUFFIX VARCHAR(20) ,SYS_PARENT_ORDER_ID_SUFFIX VARCHAR(20) ,CREDIT_BREACH_PERCENT DECIMAL(25, 10) ,
+             CREDIT_BREACH_OVERRIDE VARCHAR(256) ,INFO_BARRIER_ID VARCHAR(256) ,EXCH_PARTICIPANT_ID VARCHAR(64) ,REJECT_REASON_CD VARCHAR(4) ,
+             DIRECTED_DEST VARCHAR(20) ,REG_NMS_LINK_TYPE VARCHAR(20) ,CONVER_RATIO DECIMAL(18, 9) ,STOCK_REF_PRICE DECIMAL(38, 18) ,
+             CB_SWAP_ORDER_FG  VARCHAR(1) ,EV DECIMAL(38, 18) ,SYS_DATA_MODIFIED_TS TIMESTAMP ,CMSN_TYPE VARCHAR(20) ,SYS_CREDIT_TRDR_ID VARCHAR(20) ,
+             SYS_ENTRY_USER_ID VARCHAR(20) ,OPEN_CLOSE_CD VARCHAR(20) ,AS_OF_TRD_FG  VARCHAR(1) ,HANDLING_INSTR VARCHAR(20) ,SECURITY_DESC VARCHAR(512) ,
+             MINIMUM_QTY DECIMAL(21, 6) ,CUST_OR_FIRM VARCHAR(20) ,MAXIMUM_SHOW DECIMAL(21, 6) ,SECURITY_SUB_TYPE VARCHAR(20) ,MULTILEG_RPT_TYPE VARCHAR(4) ,
+             ORDER_ACTION_TYPE VARCHAR(4) ,BARRIER_STYLE VARCHAR(4) ,AUTO_IOI_REF_TYPE VARCHAR(4) ,PEG_OFFSET_VAL DECIMAL(10, 2) ,AUTO_IOI_OFFSET DECIMAL(28, 12) ,
+             IOI_PRICE DECIMAL(28, 12) ,TGT_PRICE DECIMAL(28, 12) ,IOI_QTY VARCHAR(64) ,IOI_ORDER_QTY DECIMAL(18, 4) ,CMSN VARCHAR(64) ,SYS_LEG_REF_ID VARCHAR(64) ,
+             TRADING_TYPE VARCHAR(4) ,EXCH_ORDER_ID VARCHAR(64) ,DEAL_ID VARCHAR(64) ,ORDER_TRD_TYPE VARCHAR(4) ,CXL_REASON VARCHAR(64))
+             USING ROW OPTIONS (partition_by 'SINGLE_ORDER_DID', buckets '11',redundancy ':redundancy', EVICTION_BY ':evictionByOption', OVERFLOW 'true');
+
+INSERT INTO ORDERS_DETAILS SELECT * FROM staging_orders_details;
+
+-- DROP TABLE IF ALREADY EXISTS --
+DROP TABLE IF EXISTS EXEC_DETAILS;
+DROP TABLE IF EXISTS staging_exec_details;
+
+-- CREATE PARTITONED ROW TABLE EXEC_DETAILS --
+CREATE EXTERNAL TABLE staging_exec_details USING com.databricks.spark.csv
+             OPTIONS (path ':dataLocation/EXEC_DETAILS.dat', header 'true', inferSchema 'false', nullValue 'NULL');
+
+CREATE TABLE EXEC_DETAILS
+             (EXEC_DID BIGINT,SYS_EXEC_VER INTEGER,SYS_EXEC_ID VARCHAR(64),TRD_DATE VARCHAR(20),ALT_EXEC_ID VARCHAR(64),SYS_EXEC_STAT VARCHAR(20),
+             DW_EXEC_STAT VARCHAR(20),ORDER_OWNER_FIRM_ID VARCHAR(20),TRDR_SYS_LOGON_ID VARCHAR(64),CONTRA_BROKER_MNEMONIC VARCHAR(20),SIDE VARCHAR(20),
+             TICKER_SYMBOL VARCHAR(32),SYS_SECURITY_ALT_ID VARCHAR(64),PRODUCT_CAT_CD VARCHAR(20),LAST_MKT VARCHAR(20),EXECUTED_QTY DECIMAL(18, 4),
+             EXEC_PRICE DECIMAL( 38, 18),EXEC_PRICE_CURR_CD VARCHAR(20),EXEC_CAPACITY VARCHAR(20),CLIENT_ACCT_ID BIGINT,FIRM_ACCT_ID BIGINT,
+             AVG_PRICE_ACCT_ID BIGINT,OCEAN_ACCT_ID BIGINT,EXEC_CNTRY_CD VARCHAR(20),CMSN VARCHAR(20),COMMENT_TXT VARCHAR(2000),
+             ACT_BRCH_SEQ_TXT VARCHAR(20),IGNORE_CD VARCHAR(20),SRC_SYS VARCHAR(20),EXEC_TYPE_CD VARCHAR(20),LIQUIDITY_CD VARCHAR(20),
+             ASK_PRICE DECIMAL( 38, 18),ASK_QTY DECIMAL(18, 4),TRD_REPORT_ASOF_DATE VARCHAR(20),BID_PRICE DECIMAL( 38, 18),BID_QTY DECIMAL(18, 4),
+             CROSS_ID VARCHAR(64),NYSE_SUBREPORT_TYPE VARCHAR(20),QUOTE_COORDINATOR VARCHAR(20),QUOTE_TIME TIMESTAMP,REG_NMS_EXCEPT_CD VARCHAR(20),
+             REG_NMS_EXCEPT_TXT VARCHAR(2000),REG_NMS_LINK_ID VARCHAR(64),REG_NMS_MKT_CENTER_ID VARCHAR(64),REG_NMS_OVERRIDE VARCHAR(20),REG_NMS_PRINTS  VARCHAR(1),
+             EXECUTED_BY VARCHAR(20),TICKER_SYMBOL_SUFFIX VARCHAR(20),PREREGNMS_TRD_MOD1  VARCHAR(1),PREREGNMS_TRD_MOD2  VARCHAR(1),PREREGNMS_TRD_MOD3  VARCHAR(1),
+             PREREGNMS_TRD_MOD4  VARCHAR(1),NMS_FG  VARCHAR(1),GIVEUP_BROKER VARCHAR(20),CHANNEL_NM VARCHAR(128),ORDER_FLOW_ENTRY VARCHAR(20),FLOW_CAT VARCHAR(20),
+             FLOW_CLASS VARCHAR(20),FLOW_TGT VARCHAR(20),ORDER_FLOW_CHANNEL VARCHAR(20),FLOW_SUBCAT VARCHAR(20),SYS_ACCT_ID_SRC VARCHAR(64),STRTGY_CD VARCHAR(20),
+             EXECUTING_BROKER_CD VARCHAR(20),LEAF_EXEC_FG  VARCHAR(1),RCVD_EXEC_ID VARCHAR(64),RCVD_EXEC_VER INTEGER,ORDER_FLOW_DESK VARCHAR(20),
+             SYS_ROOT_ORDER_ID VARCHAR(64),SYS_ROOT_ORDER_VER INTEGER,GLB_ROOT_ORDER_ID VARCHAR(64),TOTAL_EXECUTED_QTY DECIMAL(18, 4),AVG_PRICE DECIMAL( 38, 18),
+             DEST_CD VARCHAR(20),CLIENT_ORDER_REFID VARCHAR(64),CLIENT_ORDER_ORIG_REFID VARCHAR(64),CROSS_EXEC_FG  VARCHAR(1),OCEAN_PRODUCT_ID BIGINT,
+             TRDR_ID BIGINT,REF_TIME_ID INTEGER,CREATED_BY VARCHAR(64),CREATED_DATE TIMESTAMP,FIX_EXEC_ID VARCHAR(64),FIX_ORIGINAL_EXEC_ID VARCHAR(64),
+             RELATED_MKT_CENTER VARCHAR(20),TRANS_TS TIMESTAMP,SYS_SECURITY_ALT_SRC VARCHAR(20),EVENT_TYPE_CD VARCHAR(20),SYS_CLIENT_ACCT_ID VARCHAR(64),
+             SYS_FIRM_ACCT_ID VARCHAR(20),SYS_AVG_PRICE_ACCT_ID VARCHAR(20),SYS_TRDR_ID VARCHAR(64),ACT_BRCH_SEQ VARCHAR(20),SYS_ORDER_ID VARCHAR(64),
+             SYS_ORDER_VER INTEGER,SRC_FEED_REF_CD VARCHAR(64),DIGEST_KEY VARCHAR(128),TRUE_LAST_MKT VARCHAR(20),ENTRY_TS TIMESTAMP,OPT_STRIKE_PRICE DECIMAL( 38, 18),
+             OPT_MATURITY_DATE VARCHAR(20),EXPIRE_TS TIMESTAMP,OPT_PUT_OR_CALL VARCHAR(20),SYS_ORDER_STAT_CD VARCHAR(20),CONTRA_ACCT VARCHAR(64),CONTRA_ACCT_SRC VARCHAR(20),
+             CONTRA_BROKER_SRC VARCHAR(20),SYS_SECURITY_ID VARCHAR(64),SYS_SECURITY_ID_SRC VARCHAR(20),SYS_SRC_SYS_ID VARCHAR(20),SYS_ORDER_ID_UNIQUE_SUFFIX VARCHAR(20),
+             DEST VARCHAR(20),DEST_ID_SRC VARCHAR(4),CONVER_RATIO DECIMAL(18, 9),STOCK_REF_PRICE DECIMAL( 38, 18),AS_OF_TRD_FG  VARCHAR(1),MULTILEG_RPT_TYPE VARCHAR(4),
+             REG_NMS_LINK_TYPE VARCHAR(20),EXEC_SUB_TYPE VARCHAR(4),CMSN_TYPE VARCHAR(20),QUOTE_CONDITION_IND VARCHAR(20),TRD_THROUGH_FG  VARCHAR(1),
+             REGNMS_ORDER_LINK_ID VARCHAR(64),REGNMS_ORDER_LINK_TYPE VARCHAR(20),DK_IND VARCHAR(20),NBBO_QUOTE_TIME VARCHAR(20),GLB_ROOT_SRC_SYS_ID VARCHAR(20),
+             TRD_REPORT_TYPE VARCHAR(20),REPORT_TO_EXCH_FG VARCHAR(1),CMPLN_COMMENT VARCHAR(256),DEAL_TYPE VARCHAR(4),EXEC_COMMENTS VARCHAR(256),
+             OPTAL_FIELDS VARCHAR(120),SPOT_REF_PRICE VARCHAR(20),DELTA_OVERRIDE VARCHAR(20),UNDERLYING_PRICE VARCHAR(20),PRICE_DELTA VARCHAR(20),
+             NORMALIZED_LIQUIDITY_IND VARCHAR(4),USER_AVG_PRICE VARCHAR(20),LAST_EXEC_TS TIMESTAMP,LULD_LOWER_PRICE_BAND VARCHAR(20),LULD_UPPER_PRICE_BAND VARCHAR(20),
+             LULD_PRICE_BAND_TS TIMESTAMP,REMNG_QTY DECIMAL(18, 4),ORDER_QTY DECIMAL(18, 4),AMD_TS TIMESTAMP,SETL_CODE VARCHAR(50),SETL_DATE VARCHAR(20),
+             CUST_NM VARCHAR(50),EXEC_TYPE VARCHAR(50),TRDR_KEY VARCHAR(50),TRDR_NM VARCHAR(50),FX_RATE VARCHAR(50),CUST_FX_RATE VARCHAR(50),
+             PARENT_ORDER_SYS_NM VARCHAR(10),CNC_TYPE VARCHAR(50),FEE_AMT DECIMAL(20, 2),FEE_CCY VARCHAR(10),BRKG_AMT DECIMAL(20, 2),BRKG_CCY VARCHAR(10),
+             CLEAR VARCHAR(50),PMT_FIX_DATE VARCHAR(20),FOLLOW_ON_FG  VARCHAR(1),FX_RATE_CCY_TO VARCHAR(10),FX_RATE_CCY_FROM VARCHAR(10),CUST_FX_RATE_CCY_TO VARCHAR(10),
+             CUST_FX_RATE_CCY_FROM VARCHAR(10),SYS_GFCID VARCHAR(20),CONTRA_SIDE VARCHAR(20),OPT_CONTRACT_MULTIPLIER DECIMAL(10, 2),PRIOR_REF_PRICE_TS TIMESTAMP,
+             SECURITY_SUB_TYPE VARCHAR(20),MSG_DIRECTION VARCHAR(20),LEAF_SYS_EXEC_ID VARCHAR(64),LEAF_SRC_SYS VARCHAR(20),FIX_LAST_MKT VARCHAR(20),
+             FIX_CONTRA_BROKER_MNEMONIC VARCHAR(20),RIO_MSG_SRC VARCHAR(64),SNAPSHOT_TS TIMESTAMP,EXTERNAL_TRANS_TS TIMESTAMP,PRICE_CATEGORY VARCHAR(32),
+             UNDERLYING_FX_RATE DECIMAL(36, 8),CONVERSION_RATE DECIMAL(36, 8),TRANS_COMMENT VARCHAR(256),AGGRESSOR_FLAG VARCHAR(1))
+             USING ROW OPTIONS (partition_by 'EXEC_DID', buckets '11', redundancy ':redundancy', EVICTION_BY ':evictionByOption', OVERFLOW 'true') ;
+
+INSERT INTO EXEC_DETAILS SELECT * FROM staging_exec_details;
+
diff --git a/dtests/src/resources/scripts/ct/ct_create_and_load_persistentEvictionOverflow_row_table.sql b/dtests/src/resources/scripts/ct/ct_create_and_load_persistentEvictionOverflow_row_table.sql
new file mode 100644
index 0000000000..13a8dbc865
--- /dev/null
+++ b/dtests/src/resources/scripts/ct/ct_create_and_load_persistentEvictionOverflow_row_table.sql
@@ -0,0 +1,111 @@
+DROP DISKSTORE IF EXISTS OverflowDiskStore;
+CREATE DISKSTORE OverflowDiskStore;
+
+-- DROP TABLE IF ALREADY EXISTS --
+DROP TABLE IF EXISTS ORDERS_DETAILS;
+DROP TABLE IF EXISTS staging_orders_details;
+
+-- CREATE PERSISTENT COLOCATED ROW TABLE ORDERS_DETAILS --
+CREATE EXTERNAL TABLE staging_orders_details USING com.databricks.spark.csv
+             OPTIONS (path ':dataLocation/ORDERS_DETAILS.dat', header 'true', inferSchema 'false',
+              nullValue 'NULL');
+
+CREATE TABLE ORDERS_DETAILS
+             (SINGLE_ORDER_DID BIGINT ,SYS_ORDER_ID VARCHAR(64) ,SYS_ORDER_VER INTEGER ,DATA_SNDG_SYS_NM VARCHAR(128) ,
+             SRC_SYS VARCHAR(20) ,SYS_PARENT_ORDER_ID VARCHAR(64) ,SYS_PARENT_ORDER_VER SMALLINT ,PARENT_ORDER_TRD_DATE VARCHAR(20),
+             PARENT_ORDER_SYS_NM VARCHAR(128) ,SYS_ALT_ORDER_ID VARCHAR(64) ,TRD_DATE VARCHAR(20),GIVE_UP_BROKER VARCHAR(20) ,
+             EVENT_RCV_TS TIMESTAMP ,SYS_ROOT_ORDER_ID VARCHAR(64) ,GLB_ROOT_ORDER_ID VARCHAR(64) ,GLB_ROOT_ORDER_SYS_NM VARCHAR(128) ,
+             GLB_ROOT_ORDER_RCV_TS TIMESTAMP ,SYS_ORDER_STAT_CD VARCHAR(20) ,SYS_ORDER_STAT_DESC_TXT VARCHAR(120) ,DW_STAT_CD VARCHAR(20) ,
+             EVENT_TS TIMESTAMP,ORDER_OWNER_FIRM_ID VARCHAR(20),RCVD_ORDER_ID VARCHAR(64) ,EVENT_INITIATOR_ID VARCHAR(64),
+             TRDR_SYS_LOGON_ID VARCHAR(64),SOLICITED_FG  VARCHAR(1),RCVD_FROM_FIRMID_CD VARCHAR(20),RCV_DESK VARCHAR(20),
+             SYS_ACCT_ID_SRC VARCHAR(64) ,CUST_ACCT_MNEMONIC VARCHAR(128),CUST_SLANG VARCHAR(20) ,SYS_ACCT_TYPE VARCHAR(20) ,
+             CUST_EXCH_ACCT_ID VARCHAR(64) ,SYS_SECURITY_ALT_ID VARCHAR(64) ,TICKER_SYMBOL VARCHAR(32) ,TICKER_SYMBOL_SUFFIX VARCHAR(20) ,
+             PRODUCT_CAT_CD VARCHAR(20) ,SIDE VARCHAR(20) ,LIMIT_PRICE DECIMAL(38, 18),STOP_PRICE DECIMAL(38, 18),ORDER_QTY DECIMAL(18, 4) ,
+             TOTAL_EXECUTED_QTY DECIMAL(18, 4) ,AVG_PRICE DECIMAL(38, 18) ,DAY_EXECUTED_QTY DECIMAL(18, 4) ,DAY_AVG_PRICE DECIMAL(38, 18) ,
+             REMNG_QTY DECIMAL(18, 4) ,CNCL_QTY DECIMAL(18, 4) ,CNCL_BY_FG  VARCHAR(1) ,EXPIRE_TS TIMESTAMP ,EXEC_INSTR VARCHAR(64) ,
+             TIME_IN_FORCE VARCHAR(20) ,RULE80AF  VARCHAR(1) ,DEST_FIRMID_CD VARCHAR(20) ,SENT_TO_CONDUIT VARCHAR(20) ,SENT_TO_MPID VARCHAR(20) ,
+             RCV_METHOD_CD VARCHAR(20) ,LIMIT_ORDER_DISP_IND  VARCHAR(1) ,MERGED_ORDER_FG  VARCHAR(1) ,MERGED_TO_ORDER_ID VARCHAR(64) ,
+             RCV_DEPT_ID VARCHAR(20) ,ROUTE_METHOD_CD VARCHAR(20) ,LOCATE_ID VARCHAR(256) ,LOCATE_TS TIMESTAMP ,LOCATE_OVERRIDE_REASON VARCHAR(2000) ,
+             LOCATE_BROKER VARCHAR(256) ,ORDER_BRCH_SEQ_TXT VARCHAR(20) ,IGNORE_CD VARCHAR(20) ,CLIENT_ORDER_REFID VARCHAR(64) ,
+             CLIENT_ORDER_ORIG_REFID VARCHAR(64) ,ORDER_TYPE_CD VARCHAR(20) ,SENT_TO_ORDER_ID VARCHAR(64) ,ASK_PRICE DECIMAL(38, 18) ,
+             ASK_QTY DECIMAL(18, 4) ,BID_PRICE DECIMAL(38, 18) ,BID_QTY DECIMAL(18, 4) ,REG_NMS_EXCEP_CD VARCHAR(20) ,REG_NMS_EXCEP_TXT VARCHAR(2000) ,
+             REG_NMS_LINK_ID VARCHAR(64) ,REG_NMS_PRINTS  VARCHAR(1) ,REG_NMS_STOP_TIME TIMESTAMP ,SENT_TS TIMESTAMP ,RULE92  VARCHAR(1) ,
+             RULE92_OVERRIDE_TXT VARCHAR(2000) ,RULE92_RATIO DECIMAL(25, 10) ,EXMPT_STGY_BEGIN_TIME TIMESTAMP ,EXMPT_STGY_END_TIME TIMESTAMP ,
+             EXMPT_STGY_PRICE_INST VARCHAR(2000) ,EXMPT_STGY_QTY DECIMAL(18, 4) ,CAPACITY VARCHAR(20) ,DISCRETION_QTY DECIMAL(18, 4) ,
+             DISCRETION_PRICE VARCHAR(64) ,BRCHID_CD VARCHAR(20) ,BASKET_ORDER_ID VARCHAR(64) ,PT_STRTGY_CD VARCHAR(20) ,
+             SETL_DATE VARCHAR(20),SETL_TYPE VARCHAR(20) ,SETL_CURR_CD VARCHAR(20) ,SETL_INSTRS VARCHAR(2000) ,COMMENT_TXT VARCHAR(2000) ,
+             CHANNEL_NM VARCHAR(128) ,FLOW_CAT VARCHAR(20) ,FLOW_CLASS VARCHAR(20) ,FLOW_TGT VARCHAR(20) ,ORDER_FLOW_ENTRY VARCHAR(20) ,
+             ORDER_FLOW_CHANNEL VARCHAR(20) ,ORDER_FLOW_DESK VARCHAR(20) ,FLOW_SUB_CAT VARCHAR(20) ,STRTGY_CD VARCHAR(20) ,RCVD_FROM_VENDOR VARCHAR(20) ,
+             RCVD_FROM_CONDUIT VARCHAR(20) ,SLS_PERSON_ID VARCHAR(64) ,SYNTHETIC_FG  VARCHAR(1) ,SYNTHETIC_TYPE VARCHAR(20) ,FXRT DECIMAL(25, 8) ,
+             PARENT_CLREFID VARCHAR(64) ,REF_TIME_ID INTEGER ,OPT_CONTRACT_QTY DECIMAL(18, 4) ,OCEAN_PRODUCT_ID BIGINT ,CREATED_BY VARCHAR(64) ,
+             CREATED_DATE TIMESTAMP ,FIRM_ACCT_ID BIGINT ,DEST VARCHAR(20) ,CNTRY_CD VARCHAR(20) ,DW_SINGLE_ORDER_CAT VARCHAR(20) ,CLIENT_ACCT_ID BIGINT ,
+             EXTERNAL_TRDR_ID VARCHAR(64) ,ANONYMOUS_ORDER_FG  VARCHAR(1) ,SYS_SECURITY_ALT_SRC VARCHAR(20) ,CURR_CD VARCHAR(20) ,
+             EVENT_TYPE_CD VARCHAR(20) ,SYS_CLIENT_ACCT_ID VARCHAR(64) ,SYS_FIRM_ACCT_ID VARCHAR(20) ,SYS_TRDR_ID VARCHAR(64) ,DEST_ID INTEGER ,
+             OPT_PUT_OR_CALL VARCHAR(20) ,SRC_FEED_REF_CD VARCHAR(64) ,DIGEST_KEY VARCHAR(128) ,EFF_TS TIMESTAMP ,ENTRY_TS TIMESTAMP ,
+             OPT_STRIKE_PRICE DECIMAL(38, 18) ,OPT_MATURITY_DATE VARCHAR(20) ,ORDER_RESTR VARCHAR(4) ,SHORT_SELL_EXEMPT_CD VARCHAR(4) ,
+             QUOTE_TIME TIMESTAMP ,SLS_CREDIT VARCHAR(20) ,SYS_SECURITY_ID VARCHAR(64) ,SYS_SECURITY_ID_SRC VARCHAR(20) ,SYS_SRC_SYS_ID VARCHAR(20) ,
+             SYS_ORDER_ID_UNIQUE_SUFFIX VARCHAR(20) ,DEST_ID_SRC VARCHAR(4) ,GLB_ROOT_SRC_SYS_ID VARCHAR(20) ,GLB_ROOT_ORDER_ID_SUFFIX VARCHAR(64) ,
+             SYS_ROOT_ORDER_ID_SUFFIX VARCHAR(20) ,SYS_PARENT_ORDER_ID_SUFFIX VARCHAR(20) ,CREDIT_BREACH_PERCENT DECIMAL(25, 10) ,
+             CREDIT_BREACH_OVERRIDE VARCHAR(256) ,INFO_BARRIER_ID VARCHAR(256) ,EXCH_PARTICIPANT_ID VARCHAR(64) ,REJECT_REASON_CD VARCHAR(4) ,
+             DIRECTED_DEST VARCHAR(20) ,REG_NMS_LINK_TYPE VARCHAR(20) ,CONVER_RATIO DECIMAL(18, 9) ,STOCK_REF_PRICE DECIMAL(38, 18) ,
+             CB_SWAP_ORDER_FG  VARCHAR(1) ,EV DECIMAL(38, 18) ,SYS_DATA_MODIFIED_TS TIMESTAMP ,CMSN_TYPE VARCHAR(20) ,SYS_CREDIT_TRDR_ID VARCHAR(20) ,
+             SYS_ENTRY_USER_ID VARCHAR(20) ,OPEN_CLOSE_CD VARCHAR(20) ,AS_OF_TRD_FG  VARCHAR(1) ,HANDLING_INSTR VARCHAR(20) ,SECURITY_DESC VARCHAR(512) ,
+             MINIMUM_QTY DECIMAL(21, 6) ,CUST_OR_FIRM VARCHAR(20) ,MAXIMUM_SHOW DECIMAL(21, 6) ,SECURITY_SUB_TYPE VARCHAR(20) ,MULTILEG_RPT_TYPE VARCHAR(4) ,
+             ORDER_ACTION_TYPE VARCHAR(4) ,BARRIER_STYLE VARCHAR(4) ,AUTO_IOI_REF_TYPE VARCHAR(4) ,PEG_OFFSET_VAL DECIMAL(10, 2) ,AUTO_IOI_OFFSET DECIMAL(28, 12) ,
+             IOI_PRICE DECIMAL(28, 12) ,TGT_PRICE DECIMAL(28, 12) ,IOI_QTY VARCHAR(64) ,IOI_ORDER_QTY DECIMAL(18, 4) ,CMSN VARCHAR(64) ,SYS_LEG_REF_ID VARCHAR(64) ,
+             TRADING_TYPE VARCHAR(4) ,EXCH_ORDER_ID VARCHAR(64) ,DEAL_ID VARCHAR(64) ,ORDER_TRD_TYPE VARCHAR(4) ,CXL_REASON VARCHAR(64))
+             USING ROW OPTIONS (partition_by 'SINGLE_ORDER_DID', buckets '11', PERSISTENT ':persistenceMode',
+              redundancy ':redundancy', EVICTION_BY ':evictionByOption', OVERFLOW 'true', diskstore 'OverflowDiskStore');
+
+INSERT INTO ORDERS_DETAILS SELECT * FROM staging_orders_details;
+
+-- DROP TABLE IF ALREADY EXISTS --
+DROP TABLE IF EXISTS EXEC_DETAILS;
+DROP TABLE IF EXISTS staging_exec_details;
+
+-- CREATE PERSISTENT COLCOATED ROW TABLE EXEC_DETAILS --
+CREATE EXTERNAL TABLE staging_exec_details USING com.databricks.spark.csv
+             OPTIONS (path ':dataLocation/EXEC_DETAILS.dat', header 'true', inferSchema 'false', nullValue 'NULL');
+
+CREATE TABLE EXEC_DETAILS
+             (EXEC_DID BIGINT,SYS_EXEC_VER INTEGER,SYS_EXEC_ID VARCHAR(64),TRD_DATE VARCHAR(20),ALT_EXEC_ID VARCHAR(64),SYS_EXEC_STAT VARCHAR(20),
+             DW_EXEC_STAT VARCHAR(20),ORDER_OWNER_FIRM_ID VARCHAR(20),TRDR_SYS_LOGON_ID VARCHAR(64),CONTRA_BROKER_MNEMONIC VARCHAR(20),SIDE VARCHAR(20),
+             TICKER_SYMBOL VARCHAR(32),SYS_SECURITY_ALT_ID VARCHAR(64),PRODUCT_CAT_CD VARCHAR(20),LAST_MKT VARCHAR(20),EXECUTED_QTY DECIMAL(18, 4),
+             EXEC_PRICE DECIMAL( 38, 18),EXEC_PRICE_CURR_CD VARCHAR(20),EXEC_CAPACITY VARCHAR(20),CLIENT_ACCT_ID BIGINT,FIRM_ACCT_ID BIGINT,
+             AVG_PRICE_ACCT_ID BIGINT,OCEAN_ACCT_ID BIGINT,EXEC_CNTRY_CD VARCHAR(20),CMSN VARCHAR(20),COMMENT_TXT VARCHAR(2000),
+             ACT_BRCH_SEQ_TXT VARCHAR(20),IGNORE_CD VARCHAR(20),SRC_SYS VARCHAR(20),EXEC_TYPE_CD VARCHAR(20),LIQUIDITY_CD VARCHAR(20),
+             ASK_PRICE DECIMAL( 38, 18),ASK_QTY DECIMAL(18, 4),TRD_REPORT_ASOF_DATE VARCHAR(20),BID_PRICE DECIMAL( 38, 18),BID_QTY DECIMAL(18, 4),
+             CROSS_ID VARCHAR(64),NYSE_SUBREPORT_TYPE VARCHAR(20),QUOTE_COORDINATOR VARCHAR(20),QUOTE_TIME TIMESTAMP,REG_NMS_EXCEPT_CD VARCHAR(20),
+             REG_NMS_EXCEPT_TXT VARCHAR(2000),REG_NMS_LINK_ID VARCHAR(64),REG_NMS_MKT_CENTER_ID VARCHAR(64),REG_NMS_OVERRIDE VARCHAR(20),REG_NMS_PRINTS  VARCHAR(1),
+             EXECUTED_BY VARCHAR(20),TICKER_SYMBOL_SUFFIX VARCHAR(20),PREREGNMS_TRD_MOD1  VARCHAR(1),PREREGNMS_TRD_MOD2  VARCHAR(1),PREREGNMS_TRD_MOD3  VARCHAR(1),
+             PREREGNMS_TRD_MOD4  VARCHAR(1),NMS_FG  VARCHAR(1),GIVEUP_BROKER VARCHAR(20),CHANNEL_NM VARCHAR(128),ORDER_FLOW_ENTRY VARCHAR(20),FLOW_CAT VARCHAR(20),
+             FLOW_CLASS VARCHAR(20),FLOW_TGT VARCHAR(20),ORDER_FLOW_CHANNEL VARCHAR(20),FLOW_SUBCAT VARCHAR(20),SYS_ACCT_ID_SRC VARCHAR(64),STRTGY_CD VARCHAR(20),
+             EXECUTING_BROKER_CD VARCHAR(20),LEAF_EXEC_FG  VARCHAR(1),RCVD_EXEC_ID VARCHAR(64),RCVD_EXEC_VER INTEGER,ORDER_FLOW_DESK VARCHAR(20),
+             SYS_ROOT_ORDER_ID VARCHAR(64),SYS_ROOT_ORDER_VER INTEGER,GLB_ROOT_ORDER_ID VARCHAR(64),TOTAL_EXECUTED_QTY DECIMAL(18, 4),AVG_PRICE DECIMAL( 38, 18),
+             DEST_CD VARCHAR(20),CLIENT_ORDER_REFID VARCHAR(64),CLIENT_ORDER_ORIG_REFID VARCHAR(64),CROSS_EXEC_FG  VARCHAR(1),OCEAN_PRODUCT_ID BIGINT,
+             TRDR_ID BIGINT,REF_TIME_ID INTEGER,CREATED_BY VARCHAR(64),CREATED_DATE TIMESTAMP,FIX_EXEC_ID VARCHAR(64),FIX_ORIGINAL_EXEC_ID VARCHAR(64),
+             RELATED_MKT_CENTER VARCHAR(20),TRANS_TS TIMESTAMP,SYS_SECURITY_ALT_SRC VARCHAR(20),EVENT_TYPE_CD VARCHAR(20),SYS_CLIENT_ACCT_ID VARCHAR(64),
+             SYS_FIRM_ACCT_ID VARCHAR(20),SYS_AVG_PRICE_ACCT_ID VARCHAR(20),SYS_TRDR_ID VARCHAR(64),ACT_BRCH_SEQ VARCHAR(20),SYS_ORDER_ID VARCHAR(64),
+             SYS_ORDER_VER INTEGER,SRC_FEED_REF_CD VARCHAR(64),DIGEST_KEY VARCHAR(128),TRUE_LAST_MKT VARCHAR(20),ENTRY_TS TIMESTAMP,OPT_STRIKE_PRICE DECIMAL( 38, 18),
+             OPT_MATURITY_DATE VARCHAR(20),EXPIRE_TS TIMESTAMP,OPT_PUT_OR_CALL VARCHAR(20),SYS_ORDER_STAT_CD VARCHAR(20),CONTRA_ACCT VARCHAR(64),CONTRA_ACCT_SRC VARCHAR(20),
+             CONTRA_BROKER_SRC VARCHAR(20),SYS_SECURITY_ID VARCHAR(64),SYS_SECURITY_ID_SRC VARCHAR(20),SYS_SRC_SYS_ID VARCHAR(20),SYS_ORDER_ID_UNIQUE_SUFFIX VARCHAR(20),
+             DEST VARCHAR(20),DEST_ID_SRC VARCHAR(4),CONVER_RATIO DECIMAL(18, 9),STOCK_REF_PRICE DECIMAL( 38, 18),AS_OF_TRD_FG  VARCHAR(1),MULTILEG_RPT_TYPE VARCHAR(4),
+             REG_NMS_LINK_TYPE VARCHAR(20),EXEC_SUB_TYPE VARCHAR(4),CMSN_TYPE VARCHAR(20),QUOTE_CONDITION_IND VARCHAR(20),TRD_THROUGH_FG  VARCHAR(1),
+             REGNMS_ORDER_LINK_ID VARCHAR(64),REGNMS_ORDER_LINK_TYPE VARCHAR(20),DK_IND VARCHAR(20),NBBO_QUOTE_TIME VARCHAR(20),GLB_ROOT_SRC_SYS_ID VARCHAR(20),
+             TRD_REPORT_TYPE VARCHAR(20),REPORT_TO_EXCH_FG VARCHAR(1),CMPLN_COMMENT VARCHAR(256),DEAL_TYPE VARCHAR(4),EXEC_COMMENTS VARCHAR(256),
+             OPTAL_FIELDS VARCHAR(120),SPOT_REF_PRICE VARCHAR(20),DELTA_OVERRIDE VARCHAR(20),UNDERLYING_PRICE VARCHAR(20),PRICE_DELTA VARCHAR(20),
+             NORMALIZED_LIQUIDITY_IND VARCHAR(4),USER_AVG_PRICE VARCHAR(20),LAST_EXEC_TS TIMESTAMP,LULD_LOWER_PRICE_BAND VARCHAR(20),LULD_UPPER_PRICE_BAND VARCHAR(20),
+             LULD_PRICE_BAND_TS TIMESTAMP,REMNG_QTY DECIMAL(18, 4),ORDER_QTY DECIMAL(18, 4),AMD_TS TIMESTAMP,SETL_CODE VARCHAR(50),SETL_DATE VARCHAR(20),
+             CUST_NM VARCHAR(50),EXEC_TYPE VARCHAR(50),TRDR_KEY VARCHAR(50),TRDR_NM VARCHAR(50),FX_RATE VARCHAR(50),CUST_FX_RATE VARCHAR(50),
+             PARENT_ORDER_SYS_NM VARCHAR(10),CNC_TYPE VARCHAR(50),FEE_AMT DECIMAL(20, 2),FEE_CCY VARCHAR(10),BRKG_AMT DECIMAL(20, 2),BRKG_CCY VARCHAR(10),
+             CLEAR VARCHAR(50),PMT_FIX_DATE VARCHAR(20),FOLLOW_ON_FG  VARCHAR(1),FX_RATE_CCY_TO VARCHAR(10),FX_RATE_CCY_FROM VARCHAR(10),CUST_FX_RATE_CCY_TO VARCHAR(10),
+             CUST_FX_RATE_CCY_FROM VARCHAR(10),SYS_GFCID VARCHAR(20),CONTRA_SIDE VARCHAR(20),OPT_CONTRACT_MULTIPLIER DECIMAL(10, 2),PRIOR_REF_PRICE_TS TIMESTAMP,
+             SECURITY_SUB_TYPE VARCHAR(20),MSG_DIRECTION VARCHAR(20),LEAF_SYS_EXEC_ID VARCHAR(64),LEAF_SRC_SYS VARCHAR(20),FIX_LAST_MKT VARCHAR(20),
+             FIX_CONTRA_BROKER_MNEMONIC VARCHAR(20),RIO_MSG_SRC VARCHAR(64),SNAPSHOT_TS TIMESTAMP,EXTERNAL_TRANS_TS TIMESTAMP,PRICE_CATEGORY VARCHAR(32),
+             UNDERLYING_FX_RATE DECIMAL(36, 8),CONVERSION_RATE DECIMAL(36, 8),TRANS_COMMENT VARCHAR(256),AGGRESSOR_FLAG VARCHAR(1))
+             USING ROW OPTIONS (partition_by 'EXEC_DID', buckets '11', PERSISTENT ':persistenceMode',
+             redundancy ':redundancy', EVICTION_BY ':evictionByOption', OVERFLOW 'true', diskstore  'OverflowDiskStore');
+
+INSERT INTO EXEC_DETAILS SELECT * FROM staging_exec_details;
+
+
diff --git a/dtests/src/resources/scripts/ct/ct_queries.sql b/dtests/src/resources/scripts/ct/ct_queries.sql
index 7f39a38faa..145af8747a 100644
--- a/dtests/src/resources/scripts/ct/ct_queries.sql
+++ b/dtests/src/resources/scripts/ct/ct_queries.sql
@@ -68,56 +68,56 @@ select event_type_cd,event_rcv_ts,event_ts,sent_ts from ORDERS_DETAILS
 
 select count(*) from EXEC_DETAILS a LEFT JOIN ORDERS_DETAILS b using(sys_root_order_id);
 
-(select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, count(*) as trades, sum(shares) as shares
-  from (select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional, execs.EXECUTED_QTY as shares,
-    execs.TRD_DATE, case when coalesce(root_exec.flow_cat,root.flow_cat) is null then 'UNKNOWN'
-    else coalesce(root_exec.flow_cat,root.flow_cat) end as ROOT_FLOW_CAT
-    from EXEC_DETAILS as execs
-    left join
-      (select distinct TRD_DATE,glb_root_order_id,flow_cat from EXEC_DETAILS
-        where TRD_DATE in ('20160325','20160413' )
-        and (PRODUCT_CAT_CD is null or PRODUCT_CAT_CD not in ('OPT','FUT','MLEG'))
-        and (exec_price_curr_cd = 'USD' OR exec_price_curr_cd is null)
-        and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
-        and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id )
-    root_exec on execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id
-    left join
-      (select distinct TRD_DATE, glb_root_order_id,flow_cat from ORDERS_DETAILS T
-        where T.sys_order_id = T.glb_root_order_id and T.sys_src_sys_id = T.glb_root_src_sys_id
-        and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
-        and T.TRD_DATE in ('20160325','20160413' ) and (T.CURR_CD = 'USD' or T.CURR_CD is null)
-        and (T.PRODUCT_CAT_CD is null or T.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) )
-    root on execs.trd_date=root.trd_date and execs.glb_root_order_id=root.glb_root_order_id
-      where execs.LEAF_EXEC_FG = 'Y' and execs.event_type_cd = 'FILLED_CONF'
-      and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
-      and execs.SYS_ORDER_STAT_CD in ('2','1') and execs.TRD_DATE in ('20160325','20160413' )
-      and (execs.PRODUCT_CAT_CD is null or execs.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG'))
-      and (execs.exec_price_curr_cd = 'USD' or execs.exec_price_curr_cd = null) )
-    Aggregated group by TRD_DATE, ROOT_FLOW_CAT order by TRD_DATE )
-union all
-  (select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, count(*) as trades, sum (shares) as shares
-    from (select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional,
-    execs.EXECUTED_QTY as shares, execs.TRD_DATE, 'ALL' as ROOT_FLOW_CAT from EXEC_DETAILS as execs
-    left join
-    (select distinct TRD_DATE,glb_root_order_id,flow_cat from EXEC_DETAILS
-        where TRD_DATE in ('20160325','20160413' )
-        and (PRODUCT_CAT_CD is null or PRODUCT_CAT_CD not in ('OPT','FUT','MLEG'))
-        and (exec_price_curr_cd = 'USD' OR exec_price_curr_cd is null)
-        and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
-        and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id )
-        root_exec on execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id
-      left join
-      (select distinct TRD_DATE, glb_root_order_id,flow_cat from ORDERS_DETAILS T
-         where T.sys_order_id = T.glb_root_order_id and T.sys_src_sys_id = T.glb_root_src_sys_id
-         and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
-         and T.TRD_DATE in ('20160325','20160413') and (T.CURR_CD = 'USD' or T.CURR_CD is null)
-         and (T.PRODUCT_CAT_CD is null or T.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) )
-      root on execs.trd_date=root.trd_date and execs.glb_root_order_id=root.glb_root_order_id
-      where execs.LEAF_EXEC_FG = 'Y' and execs.event_type_cd = 'FILLED_CONF'
-        and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
-        and execs.SYS_ORDER_STAT_CD in ('2','1') and execs.TRD_DATE in ('20160325','20160413' )
-        and (execs.PRODUCT_CAT_CD is null or execs.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG'))
-        and (execs.exec_price_curr_cd = 'USD' or execs.exec_price_curr_cd = null) )
-    Aggregated group by TRD_DATE, ROOT_FLOW_CAT order by TRD_DATE );
-
-    select distinct FLOW_CLASS from ORDERS_DETAILS;
+-- (select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, count(*) as trades, sum(shares) as shares
+--   from (select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional, execs.EXECUTED_QTY as shares,
+--     execs.TRD_DATE, case when coalesce(root_exec.flow_cat,root.flow_cat) is null then 'UNKNOWN'
+--    else coalesce(root_exec.flow_cat,root.flow_cat) end as ROOT_FLOW_CAT
+--    from EXEC_DETAILS as execs
+--    left join
+--      (select distinct TRD_DATE,glb_root_order_id,flow_cat from EXEC_DETAILS
+--        where TRD_DATE in ('20160325','20160413' )
+--        and (PRODUCT_CAT_CD is null or PRODUCT_CAT_CD not in ('OPT','FUT','MLEG'))
+--        and (exec_price_curr_cd = 'USD' OR exec_price_curr_cd is null)
+--        and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
+--        and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id )
+--    root_exec on execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id
+--    left join
+--      (select distinct TRD_DATE, glb_root_order_id,flow_cat from ORDERS_DETAILS T
+--        where T.sys_order_id = T.glb_root_order_id and T.sys_src_sys_id = T.glb_root_src_sys_id
+--        and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
+--        and T.TRD_DATE in ('20160325','20160413' ) and (T.CURR_CD = 'USD' or T.CURR_CD is null)
+--        and (T.PRODUCT_CAT_CD is null or T.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) )
+--    root on execs.trd_date=root.trd_date and execs.glb_root_order_id=root.glb_root_order_id
+--      where execs.LEAF_EXEC_FG = 'Y' and execs.event_type_cd = 'FILLED_CONF'
+--      and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
+--      and execs.SYS_ORDER_STAT_CD in ('2','1') and execs.TRD_DATE in ('20160325','20160413' )
+--      and (execs.PRODUCT_CAT_CD is null or execs.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG'))
+--      and (execs.exec_price_curr_cd = 'USD' or execs.exec_price_curr_cd = null) )
+--    Aggregated group by TRD_DATE, ROOT_FLOW_CAT order by TRD_DATE )
+--union all
+--  (select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, count(*) as trades, sum (shares) as shares
+--    from (select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional,
+--    execs.EXECUTED_QTY as shares, execs.TRD_DATE, 'ALL' as ROOT_FLOW_CAT from EXEC_DETAILS as execs
+--    left join
+--    (select distinct TRD_DATE,glb_root_order_id,flow_cat from EXEC_DETAILS
+--        where TRD_DATE in ('20160325','20160413' )
+--        and (PRODUCT_CAT_CD is null or PRODUCT_CAT_CD not in ('OPT','FUT','MLEG'))
+--        and (exec_price_curr_cd = 'USD' OR exec_price_curr_cd is null)
+--        and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
+--        and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id )
+--        root_exec on execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id
+--      left join
+--      (select distinct TRD_DATE, glb_root_order_id,flow_cat from ORDERS_DETAILS T
+--         where T.sys_order_id = T.glb_root_order_id and T.sys_src_sys_id = T.glb_root_src_sys_id
+--         and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
+--         and T.TRD_DATE in ('20160325','20160413') and (T.CURR_CD = 'USD' or T.CURR_CD is null)
+--         and (T.PRODUCT_CAT_CD is null or T.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) )
+--      root on execs.trd_date=root.trd_date and execs.glb_root_order_id=root.glb_root_order_id
+--      where execs.LEAF_EXEC_FG = 'Y' and execs.event_type_cd = 'FILLED_CONF'
+--        and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137')
+--        and execs.SYS_ORDER_STAT_CD in ('2','1') and execs.TRD_DATE in ('20160325','20160413' )
+--        and (execs.PRODUCT_CAT_CD is null or execs.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG'))
+--        and (execs.exec_price_curr_cd = 'USD' or execs.exec_price_curr_cd = null) )
+--    Aggregated group by TRD_DATE, ROOT_FLOW_CAT order by TRD_DATE );
+
+select distinct FLOW_CLASS from ORDERS_DETAILS;
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 252bd51ad6..c56c0264d3 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -1987,6 +1987,7 @@ protected void executeSparkJob(Vector jobClassNames, String logFileName) {
         command = snappyJobScript + " --class " + userJob +
             " --master spark://" + masterHost + ":" + masterPort + " " +
             SnappyPrms.getExecutorMemory() + " " +
+            " --jars " + getStoreTestsJar() +
             SnappyPrms.getSparkSubmitExtraPrms() + " " +
             " --conf spark.executor.extraJavaOptions=-XX:+HeapDumpOnOutOfMemoryError" +
             " --conf spark.extraListeners=io.snappydata.hydra.SnappyCustomSparkListener" +
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt b/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt
index 8a01457642..39f84437f3 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt
@@ -9,116 +9,82 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   redundantCopies=0  persistenceMode="null" //not required for replicated table, but required for hydra config
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Replicated"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
-  A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=1  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  redundantCopies=0  persistenceMode="sync" //redundancy not required for replicated table, but required for hydra config
-  tableType="PersistentReplicated"
-  scriptName="ct_create_and_load_persistentReplicated_row_tables.sql"
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
+  redundantCopies=0  persistenceMode="sync" //redundancy not required for replicated table, but required for hydra config
   tableType="PersistentReplicated"
   scriptName="ct_create_and_load_persistentReplicated_row_tables.sql"
-  numVMsToStop=1
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitMode.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1,2  persistenceMode="null"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="PartitionedRow"
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeServerHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="sync"
+  redundantCopies=1  persistenceMode="null"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PartitionedRow"
-  numVMsToStop=1
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="sync"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PersistentPartitionRow"
   scriptName="ct_create_and_load_persistentPartitioned_row_table.sql"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitMode.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="null"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedRow"
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppLeadHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="ColocatedRow"
-  numVMsToStop=1
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="null"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="EvictionRow"
   scriptName="ct_create_and_load_eviction_row_table.sql"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="sync"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PersistentColocatedRow"
   scriptName="ct_create_and_load_persistentColocated_row_table.sql"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="sync"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedWithEvictionRow"
   scriptName="ct_create_and_load_colocatedWithEviction_row_table.sql"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 //Column tables
 io/snappydata/hydra/ct/ctSplitMode.conf
@@ -126,104 +92,47 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="null"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1,2  persistenceMode="sync"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PersistentColumn"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitMode.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="null"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedColumn"
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="ColocatedColumn"
-  scriptName="ct_create_and_load_colocated_column_table.sql"
-  numVMsToStop=1
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeCreateWithScriptServerHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1,2  persistenceMode="sync"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="PersistentColumn"
-  numVMsToStop=1
-  scriptName="ct_create_and_load_persistent_column_table.sql"
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitMode.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1,2  persistenceMode="sync"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="PersistentColocatedColumn"
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeLeadHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="sync"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="PersistentColocatedColumn"
-  numVMsToStop=1
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeServerHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1,2  persistenceMode="sync"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PersistentColocatedColumn"
-  numVMsToStop=1
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
 
 io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,1  persistenceMode="null"
-  dataFilesLocation="/export/shared/QA_DATA/ct"
-  tableType="Column"
-  fullResultSetValidation= true
-
-io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppServerHA.conf
-  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
-  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
-  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1,2  persistenceMode="null"
+  redundantCopies=1  persistenceMode="null"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
-  numVMsToStop=1
-  fullResultSetValidation= true
+  numRowsValidation=true fullResultSetValidation=true
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt b/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt
new file mode 100644
index 0000000000..1b79ae7cd1
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt
@@ -0,0 +1,90 @@
+io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync" //redundanctCopies not required in case of replicated table.
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentReplicated"
+  scriptName="ct_create_and_load_persistentReplicated_row_tables.sql"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PartitionedRow"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppLeadHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="ColocatedRow"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="ColocatedColumn"
+  scriptName="ct_create_and_load_colocated_column_table.sql"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScriptServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColumn"
+  numVMsToStop=1
+  scriptName="ct_create_and_load_persistent_column_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeLeadHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColocatedColumn"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColocatedColumn"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=1  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="Column"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt b/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt
new file mode 100644
index 0000000000..17355ac3dd
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt
@@ -0,0 +1,169 @@
+io/snappydata/hydra/ct/ctSplitMode.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0,2  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PartitionedRow"
+  numRowsValidation=true  fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentPartitionRow"
+  scriptName="ct_create_and_load_persistentPartitioned_row_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentPartitionRow"
+  scriptName="ct_create_and_load_persistentPartitioned_row_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitMode.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="ColocatedRow"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppLeadHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="ColocatedRow"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="EvictionRow"
+  scriptName="ct_create_and_load_eviction_row_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColocatedRow"
+  scriptName="ct_create_and_load_persistentColocated_row_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="ColocatedWithEvictionRow"
+  scriptName="ct_create_and_load_colocatedWithEviction_row_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitMode.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="Column"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColumn"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitMode.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0,2  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="ColocatedColumn"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitMode.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColocatedColumn"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=0,2  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="Column"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithScriptServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=2  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColumn"
+  numVMsToStop=1
+  scriptName="ct_create_and_load_persistent_column_table.sql"
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=2  persistenceMode="sync"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="PersistentColocatedColumn"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
+
+io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppServerHA.conf
+  A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
+  redundantCopies=2  persistenceMode="null"
+  dataFilesLocation="/export/shared/QA_DATA/ct"
+  tableType="Column"
+  numVMsToStop=1
+  numRowsValidation=true fullResultSetValidation=true
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf
index 8babc79889..b577622ba5 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf
@@ -15,7 +15,7 @@ INITTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  =
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
             io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesJob
-            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer = "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer = "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation},numRowsValidation=${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             maxThreads = 1
             threadGroups = leadThreads;
@@ -29,7 +29,7 @@ TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
             io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesApp
-            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
             maxThreads = 1
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
index cc573010b2..dc1fe7f610 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
@@ -16,7 +16,7 @@ INITTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  =
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
             io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesJob
-            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer = "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer = "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation},numRowsValidation=${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             maxThreads = 1
             threadGroups = leadThreads;
@@ -30,7 +30,7 @@ TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
             io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesApp
-            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
             maxThreads = 1
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
index bd1272e917..77ae4ed3cd 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
@@ -15,7 +15,7 @@ INITTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod = H
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
             io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesJob
-            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer = "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer = "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation},numRowsValidation=${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             maxThreads = 1
             threadGroups = leadThreads;
@@ -29,7 +29,7 @@ TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
             io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesApp
-            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
             maxThreads = 1
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index d345a24373..dcb4594236 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -25,11 +25,49 @@ import scala.io.Source
 
 
 object SnappyTestUtils {
+
+  var validateFullResultSet: Boolean = false;
+  var numRowsValidation: Boolean = false;
+  var tableType: String = null
+
+  /*
+  Executes the join query, matches the result with expected result, returns false if the query
+  validation has failed.
+  */
+  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
+       pw: PrintWriter): Any = {
+    snc.sql("set spark.sql.crossJoin.enabled = true")
+    assertQuery(snc, sqlString, numRows, queryNum, pw )
+  }
+
+
+  /*
+ Executes the query, matches the result with expected result, returns false if the query
+ validation has failed.
+ */
+  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
+      pw: PrintWriter): Any = {
+    val df = snc.sql(sqlString)
+    val count = df.count()
+    // scalastyle:off println
+    println(s"Query $queryNum")
+    df.explain(true)
+    pw.println(s"Query ${queryNum} \n df.count is : ${count} \n Expected numRows : ${numRows} " +
+        s"\n Table Type : ${tableType}")
+    println(s"Query ${queryNum} \n df.count is : ${count} \n Expected numRows : ${numRows} \n " +
+        s"Table Type : ${tableType}")
+    // scalastyle:on println
+    assert(df.count() == numRows,
+      s"Mismatch got for query ${queryNum} : df.count -> ${count} but expected numRows " +
+          s"-> $numRows for query = $sqlString Table Type : $tableType")
+    pw.flush()
+  }
+
   def assertJoinFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String,
-                              tableType: String, pw: PrintWriter, sqlContext: SQLContext): Any = {
+                              pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     snc.sql("set spark.sql.crossJoin.enabled = true")
     sqlContext.sql("set spark.sql.crossJoin.enabled = true")
-    assertQueryFullResultSet(snc, sqlString, queryNum, tableType, pw, sqlContext)
+    assertQueryFullResultSet(snc, sqlString, queryNum, pw, sqlContext)
   }
 
   def dataTypeConverter(row: Row): Row = {
@@ -47,6 +85,9 @@ object SnappyTestUtils {
     Row.fromSeq(md)
   }
 
+  /*
+   Writes the query resultset to a csv file.
+ */
   def writeToFile(df: DataFrame, dest: String, snc: SnappyContext): Unit = {
     import snc.implicits._
     df.map(dataTypeConverter)(RowEncoder(df.schema))
@@ -65,7 +106,11 @@ object SnappyTestUtils {
       "header", false).save(dest)
   }
 
-  def getTempDir(dirName: String): String = {
+  /*
+  Returns the path for the directory where the output of resultset of queries have been saved.
+  Creates a new directory, if not already existing
+   */
+  def getQueryResultDir(dirName: String): String = {
     val log: File = new File(".")
     var dest: String = null
     val dirString = log.getCanonicalPath;
@@ -73,69 +118,179 @@ object SnappyTestUtils {
       val logDir = log.listFiles.filter(_.getName.equals("snappyleader.log"))
       if (!logDir.isEmpty) {
         val leaderLogFile: File = logDir.iterator.next()
-        if (leaderLogFile.exists()) dest = dirString + File.separator + ".." + File.separator +
-            ".." + File.separator + dirName
+        if (leaderLogFile.exists()) {
+          dest = dirString + File.separator + ".." + File.separator + ".." + File.separator +
+              dirName
+        }
       }
       else dest = dirString + File.separator + ".." + File.separator + dirName
     }
     else dest = log.getCanonicalPath + File.separator + dirName
-    val tempDir: File = new File(dest)
-    if (!tempDir.exists) tempDir.mkdir()
-    return tempDir.getAbsolutePath
+    val queryResultDir: File = new File(dest)
+    if (!queryResultDir.exists) {
+      queryResultDir.mkdir()
+    }
+    return queryResultDir.getAbsolutePath
   }
 
+  /*
+   Performs full resultSet validation from snappy for a select query against results in a
+   goldenFile.
+ */
   def assertQueryFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String,
-                               tableType: String, pw: PrintWriter, sqlContext: SQLContext): Any = {
+                               pw: PrintWriter, sqlContext: SQLContext): Boolean = {
+    var hasValidationFailed = false
+
+    val snappyQueryFileName = s"Snappy_${queryNum}"
+    val snappyDest: String = SnappyTestUtils.getQueryResultDir("snappyQueryFiles") +
+        File.separator + snappyQueryFileName
+    // scalastyle:off println
+    pw.println(snappyDest)
+    val snappyFile: File = new java.io.File(snappyDest)
     var snappyDF = snc.sql(sqlString)
-    var sparkDF = sqlContext.sql(sqlString);
-    val snappyQueryFileName = s"Snappy_${queryNum}.out"
-    val sparkQueryFileName = s"Spark_${queryNum}.out"
-    val snappyDest: String = getTempDir("snappyQueryFiles_" + tableType) + File.separator +
-        snappyQueryFileName
-    val sparkDest: String = getTempDir("sparkQueryFiles") + File.separator + sparkQueryFileName
+
+    val sparkQueryFileName = s"Spark_${queryNum}"
+    val sparkDest: String = SnappyTestUtils.getQueryResultDir("sparkQueryFiles") + File.separator +
+        sparkQueryFileName
+    pw.println(sparkDest)
     val sparkFile: File = new java.io.File(sparkDest)
-    val snappyFile = new java.io.File(snappyDest)
-    val col1 = sparkDF.schema.fieldNames(0)
-    val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-    if (snappyFile.listFiles() == null) {
-      snappyDF = snappyDF.repartition(1).sortWithinPartitions(col1, col: _*)
-      writeToFile(snappyDF, snappyDest, snc)
-      // scalastyle:off println
-      pw.println(s"${queryNum} Result Collected in file $snappyDest")
-    }
-    if (sparkFile.listFiles() == null) {
-      sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
-      writeToFile(sparkDF, sparkDest, snc)
-      pw.println(s"${queryNum} Result Collected in file $sparkDest")
+    var sparkDF = sqlContext.sql(sqlString)
+
+    try {
+      if (!snappyFile.exists()) {
+        val snap_col1 = snappyDF.schema.fieldNames(0)
+        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
+        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
+        writeToFile(snappyDF, snappyDest, snc)
+        // writeResultSetToCsv(snappyDF, snappyFile)
+        pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
+      }
+      if (!sparkFile.exists()) {
+        val col1 = sparkDF.schema.fieldNames(0)
+        val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
+        sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
+        writeToFile(sparkDF, sparkDest, snc)
+        // writeResultSetToCsv(sparkDF, sparkFile)
+        pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
+      }
+      hasValidationFailed = compareFiles(snappyFile, sparkFile, pw, hasValidationFailed)
+    } catch {
+      case ex: Exception => {
+        hasValidationFailed = true
+        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
+        ex.printStackTrace(pw)
+      }
     }
+    pw.flush()
+    return hasValidationFailed
+  }
+
+  def compareFiles(snappyFile: File, sparkFile: File, pw: PrintWriter, validationFailed: Boolean):
+  Boolean = {
+    var hasValidationFailed = validationFailed
     val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
     val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
     val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
-    val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines
-    // var numLines = 0
+    val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines()
+
     while (expectedLineSet.hasNext && actualLineSet.hasNext) {
       val expectedLine = expectedLineSet.next()
       val actualLine = actualLineSet.next()
       if (!actualLine.equals(expectedLine)) {
-        pw.println(s"\n** For ${queryNum} result mismatch observed**")
-        pw.println(s"\nExpected Result:\n $expectedLine")
-        pw.println(s"\nActual Result:\n $actualLine")
-        pw.println(s"\nQuery =" + sqlString + " Table Type : " + tableType)
-        /* assert(assertion = false, s"\n** For $queryNum result mismatch observed** \n" +
-            s"Expected Result \n: $expectedLine \n" +
-            s"Actual Result   \n: $actualLine \n" +
-            s"Query =" + sqlString + " Table Type : " + tableType) */
-        // Commented due to Q37 failure by just the difference of 0.1 in actual and expected value
+        hasValidationFailed = true
+        pw.println(s"Expected Result : $expectedLine")
+        pw.println(s"Actual Result   : $actualLine")
       }
-      // numLines += 1
     }
     if (actualLineSet.hasNext || expectedLineSet.hasNext) {
-      pw.println(s"\nFor ${queryNum} result count mismatch observed")
-      assert(assertion = false, s"\nFor $queryNum result count mismatch observed")
+      hasValidationFailed = true
+      if (actualLineSet.hasNext) {
+        pw.println(s"Following ${actualLineSet.size} rows are unexpected in Snappy:")
+      }
+      while (actualLineSet.hasNext)
+        pw.println(actualLineSet.next())
+      if (expectedLineSet.hasNext) {
+        pw.println(s"Following ${expectedLineSet.size} rows are missing in Snappy:")
+      }
+      while (expectedLineSet.hasNext)
+        pw.println(expectedLineSet.next())
     }
-    // scalastyle:on println
-    pw.flush()
+    hasValidationFailed
+  }
+
+  /*
+    If validation has failed for a query, add the query number to failedQueries String
+   */
+  def addToFailedQueryList(failedQueries: String, queryNum: String): String = {
+    var str = failedQueries
+    if (str.isEmpty) {
+      str = queryNum
+    }
+    else {
+      str = str + "," + queryNum
+    }
+    return str
   }
 
+  /*
+   Performs full resultSet validation from snappy for a select query against results in a
+   goldenFile.
+ */
+  def assertValidateFullResultSetFromGoldenFile(sqlString: String, queryNum: String, tableType:
+  String, snc: SnappyContext, pw: PrintWriter, validationFailed: Boolean, goldenFileDest: String):
+  Boolean = {
+    var hasValidationFailed = validationFailed
+
+    val snappyQueryFileName = s"Snappy_${queryNum}"
+    val snappyDest: String = SnappyTestUtils.getQueryResultDir("snappyQueryFiles") +
+        File.separator + snappyQueryFileName
+    pw.println(snappyDest)
+    val snappyFile: File = new java.io.File(snappyDest)
+    var snappyDF = snc.sql(sqlString)
 
+    pw.println(goldenFileDest)
+    val goldenFileName = goldenFileDest + File.separator + s"Spark_$queryNum"
+    val sortedGoldenDest = goldenFileDest + File.separator + s"Sorted_$queryNum"
+    val sortedGoldenFile: File = new java.io.File(sortedGoldenDest)
+    val goldenFile: File = new java.io.File(goldenFileName)
+
+    try {
+      if (!snappyFile.exists()) {
+        val snap_col1 = snappyDF.schema.fieldNames(0)
+        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
+        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
+        SnappyTestUtils.writeToFile(snappyDF, snappyDest, snc)
+        // writeResultSetToCsv(snappyDF, snappyFile)
+        pw.println(s"${queryNum} Result Collected in file $snappyDest")
+      }
+      if (!goldenFile.exists()) {
+        pw.println(s"Did not find any golden file for query $queryNum")
+        throw new Exception(s"Did not find any golden file for query $queryNum")
+      } else if (goldenFile.length() > 0) {
+        // sort the contents of golden file before comparing results
+        var goldenDF = snc.read.format("com.databricks.spark.csv")
+            .option("header", "false").option("inferSchema", "true").option("nullValue", "NULL")
+            .load(goldenFileName)
+        val col1 = goldenDF.schema.fieldNames(0)
+        val col = goldenDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
+        goldenDF = goldenDF.repartition(1).sortWithinPartitions(col1, col: _*)
+        SnappyTestUtils.writeToFile(goldenDF, sortedGoldenDest, snc)
+        // writeResultSetToCsv(goldenDF, sortedGoldenFile)
+        pw.println(s"${queryNum} Result Collected in file ${sortedGoldenDest}")
+      } else {
+        pw.println(s"zero results in query $queryNum.")
+      }
+      hasValidationFailed = SnappyTestUtils.compareFiles(snappyFile, sortedGoldenFile, pw,
+        hasValidationFailed)
+
+    } catch {
+      case ex: Exception => {
+        hasValidationFailed = true
+        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
+        ex.printStackTrace(pw)
+      }
+    }
+    pw.flush()
+    return hasValidationFailed
+  }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala
deleted file mode 100644
index aaedbf4791..0000000000
--- a/dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Copyright (c) 2016 SnappyData, Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package io.snappydata.hydra
-
-import java.io.{File, PrintWriter}
-
-import scala.io.Source
-
-import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.encoders.RowEncoder
-
-object TestUtil {
-  var validateFullResultSet: Boolean = false;
-  var tableType: String = null
-
-  /*
-  Executes the join query, matches the result with expected result, returns false if the query
-  validation has failed.
-  */
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter,sqlContext: SQLContext): Boolean = {
-    var hasValidationFailed = false
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    val df = snc.sql(sqlString)
-    pw.println(s"No. rows in resultset for join query ${queryNum} is : ${df.count} for ${tableType} table")
-    if (df.count() != numRows) {
-      pw.println(s"Result mismatch for join query ${queryNum} : found ${df.count} rows but expected ${numRows} rows.")
-      hasValidationFailed = true
-    }
-    pw.flush()
-    if (validateFullResultSet)
-      hasValidationFailed = assertValidateFullResultSet(snc, sqlString, queryNum, pw, sqlContext,
-        hasValidationFailed)
-
-    if (hasValidationFailed)
-      pw.println(s"Failed Query =" + sqlString + " Table Type : " + tableType + "\n")
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  /*
-   Executes the query, matches the result with expected result, returns false if the query
-   validation has failed.
-   */
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext): Boolean = {
-    var hasValidationFailed = false
-    val df = snc.sql(sqlString)
-    pw.println(s"No. rows in resultset for query ${queryNum} is : ${df.count} for ${tableType} table")
-    if (df.count() != numRows) {
-      pw.println(s"Result mismatch for query ${queryNum} : found ${df.count} rows but expected ${numRows} rows.")
-      hasValidationFailed = true
-    }
-    pw.flush()
-    if (validateFullResultSet)
-      hasValidationFailed = assertValidateFullResultSet(snc, sqlString, queryNum, pw, sqlContext,
-        hasValidationFailed)
-
-    if (hasValidationFailed)
-      pw.println(s"Failed Query : " + sqlString + "\n Table Type : " + tableType + "\n")
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  /*
-  Performs full resultSet validation for snappy results for the query against snappy spark
-  resultset.
-   */
-  def assertValidateFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String, pw:
-  PrintWriter,sqlContext: SQLContext, validationFailed: Boolean): Boolean = {
-    var hasValidationFailed = validationFailed
-
-    val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = SnappyTestUtils.getTempDir("snappyQueryFiles") + File.separator +
-        snappyQueryFileName
-    pw.println(snappyDest)
-    val snappyFile: File = new java.io.File(snappyDest)
-    var snappyDF = snc.sql(sqlString)
-
-    val sparkQueryFileName = s"Spark_${queryNum}"
-    val sparkDest: String = SnappyTestUtils.getTempDir("sparkQueryFiles") + File.separator + sparkQueryFileName
-    pw.println(sparkDest)
-    val sparkFile: File = new java.io.File(sparkDest)
-    var sparkDF = sqlContext.sql(sqlString)
-
-    try {
-      if (!snappyFile.exists()) {
-        val snap_col1 = snappyDF.schema.fieldNames(0)
-        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
-        SnappyTestUtils.writeToFile(snappyDF,snappyDest,snc)
-        //writeResultSetToCsv(snappyDF, snappyFile)
-        pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
-      }
-      if (!sparkFile.exists()) {
-        val col1 = sparkDF.schema.fieldNames(0)
-        val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-        sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
-        SnappyTestUtils.writeToFile(sparkDF,sparkDest,snc)
-        //writeResultSetToCsv(sparkDF, sparkFile)
-        pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
-      }
-      hasValidationFailed = compareFiles(snappyFile,sparkFile,pw,hasValidationFailed)
-    } catch {
-      case ex: Exception => {
-        hasValidationFailed = true
-        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
-        ex.printStackTrace(pw)
-      }
-    }
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  /*
-  Writes the query resultset to a csv file.
-   */
-  def writeResultSetToCsv(df: DataFrame, destFile: File): Unit = {
-    val parent = destFile.getParentFile
-    if (!parent.exists()) {
-      parent.mkdirs()
-    }
-    implicit val encoder = RowEncoder(df.schema)
-    df.mapPartitions(rows => {
-      val sb: StringBuilder = new StringBuilder()
-      val pw = new PrintWriter(destFile)
-      try {
-        rows.foreach { row =>
-          row.toSeq.foreach {
-            case d: Double =>
-              // round to one decimal digit
-              sb.append(math.floor(d * 10.0 + 0.5) / 10.0).append(',')
-            case bd: java.math.BigDecimal =>
-              sb.append(bd.setScale(2, java.math.RoundingMode.HALF_UP)).append(',')
-            case v => sb.append(v).append(',')
-          }
-          val len = sb.length
-          if (len > 0) sb.setLength(len - 1)
-          sb.append('\n')
-          if (sb.length >= 1048576) {
-            pw.append(sb)
-            pw.flush()
-            sb.clear()
-          }
-        }
-        if (sb.nonEmpty) {
-          pw.append(sb)
-          pw.flush()
-        }
-      }
-      finally {
-        pw.close()
-      }
-      Iterator.empty
-    }).collect()
-  }
-
-  /*
-  Returns the path for the directory where the output of resultset of queries have been saved.
-  Creates a new directory, if not already existing
-   */
-  def getQueryResultDir(dirName: String): String = {
-    val log: File = new File(".")
-    var dest: String = null
-    val dirString = log.getCanonicalPath;
-    if (dirName.equals("sparkQueryFiles")) {
-      val logDir = log.listFiles.filter(_.getName.equals("snappyleader.log"))
-      if (!logDir.isEmpty) {
-        val leaderLogFile: File = logDir.iterator.next()
-        if (leaderLogFile.exists())
-          dest = dirString + File.separator + ".." + File.separator + ".." + File.separator + dirName
-      }
-      else dest = dirString + File.separator + ".." + File.separator + dirName
-    }
-    else dest = log.getCanonicalPath + File.separator + dirName
-    val queryResultDir: File = new File(dest)
-    if (!queryResultDir.exists)
-      queryResultDir.mkdir()
-    return queryResultDir.getAbsolutePath
-  }
-
-  /*
-  Performs full resultSet validation from snappy for a select query against results in a goldenFile.
-   */
-  def assertValidateFullResultSetFromGoldenFile(sqlString: String, queryNum: String, tableType:
-  String, snc: SnappyContext, pw: PrintWriter, validationFailed: Boolean, goldenFileDest: String):
-  Boolean = {
-    var hasValidationFailed = validationFailed
-
-    val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = SnappyTestUtils.getTempDir("snappyQueryFiles") + File.separator + snappyQueryFileName
-    pw.println(snappyDest)
-    val snappyFile: File = new java.io.File(snappyDest)
-    var snappyDF = snc.sql(sqlString)
-
-    pw.println(goldenFileDest)
-    val goldenFileName = goldenFileDest + File.separator + s"Spark_$queryNum"
-    val sortedGoldenDest = goldenFileDest + File.separator + s"Sorted_$queryNum"
-    val sortedGoldenFile: File = new java.io.File(sortedGoldenDest)
-    val goldenFile: File = new java.io.File(goldenFileName)
-
-    try {
-      if (!snappyFile.exists()) {
-        val snap_col1 = snappyDF.schema.fieldNames(0)
-        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
-        SnappyTestUtils.writeToFile(snappyDF,snappyDest,snc)
-        //writeResultSetToCsv(snappyDF, snappyFile)
-        pw.println(s"${queryNum} Result Collected in file $snappyDest")
-      }
-      if (!goldenFile.exists()) {
-        pw.println(s"Did not find any golden file for query $queryNum")
-        throw new Exception(s"Did not find any golden file for query $queryNum")
-      } else if (goldenFile.length() > 0) {
-        // sort the contents of golden file before comparing results
-        var goldenDF = snc.read.format("com.databricks.spark.csv")
-            .option("header", "false").option("inferSchema", "true").option("nullValue", "NULL")
-            .load(goldenFileName)
-        val col1 = goldenDF.schema.fieldNames(0)
-        val col = goldenDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-        goldenDF = goldenDF.repartition(1).sortWithinPartitions(col1, col: _*)
-        SnappyTestUtils.writeToFile(goldenDF,sortedGoldenDest,snc)
-        //writeResultSetToCsv(goldenDF, sortedGoldenFile)
-        pw.println(s"${queryNum} Result Collected in file ${sortedGoldenDest}")
-      } else {
-        pw.println(s"zero results in query $queryNum.")
-      }
-      hasValidationFailed = compareFiles(snappyFile,sortedGoldenFile,pw,hasValidationFailed)
-
-    } catch {
-      case ex: Exception => {
-        hasValidationFailed = true
-        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
-        ex.printStackTrace(pw)
-      }
-    }
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  def compareFiles(snappyFile: File, sparkFile: File,pw: PrintWriter,validationFailed: Boolean):
-  Boolean = {
-    var hasValidationFailed = validationFailed
-    val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
-    val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines()
-
-    while (expectedLineSet.hasNext && actualLineSet.hasNext) {
-      val expectedLine = expectedLineSet.next()
-      val actualLine = actualLineSet.next()
-      if (!actualLine.equals(expectedLine)) {
-        hasValidationFailed = true
-        pw.println(s"Expected Result : $expectedLine")
-        pw.println(s"Actual Result   : $actualLine")
-      }
-    }
-    if (actualLineSet.hasNext || expectedLineSet.hasNext) {
-      hasValidationFailed = true
-      if (actualLineSet.hasNext)
-        pw.println(s"Following ${actualLineSet.size} rows are unexpected in Snappy:")
-      while (actualLineSet.hasNext)
-        pw.println(actualLineSet.next())
-      if (expectedLineSet.hasNext)
-        pw.println(s"Following ${expectedLineSet.size} rows are missing in Snappy:")
-      while (expectedLineSet.hasNext)
-        pw.println(expectedLineSet.next())
-    }
-    hasValidationFailed
-  }
-
-  /*
-    If validation has failed for a query, add the query number to failedQueries String
-   */
-  def addToFailedQueryList(failedQueries: String, queryNum: String): String = {
-    var str = failedQueries
-    if (str.isEmpty)
-      str = queryNum
-    else
-      str = str + "," + queryNum
-    return str
-  }
-
-}
diff --git a/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala
index 84778b3b0b..d59e2a25ea 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala
@@ -28,15 +28,15 @@ object ConcTestUtils {
     for (q <- NWQueries.queries) {
       q._1 match {
         case "Q37" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q55" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q36" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q56" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q38" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         // scalastyle:off println
         case _ => println("OK")
         // scalastyle:on println
@@ -49,109 +49,109 @@ object ConcTestUtils {
     for (q <- NWPLQueries.queries) {
       q._1 match {
         case "Q1" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q1, "Q1",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q2" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q2, "Q2",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q3" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q3, "Q3",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q4" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q4, "Q4",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q5" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q5, "Q5",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q6" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q6, "Q6",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q7" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q7, "Q7",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q8" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q8, "Q8",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q9" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q9, "Q9",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q10" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q5, "Q10",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q11" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q11, "Q11",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q12" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q12, "Q12",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q13" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q13, "Q13",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q14" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q14, "Q14",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q15" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q15, "Q15",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q16" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q16, "Q16",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q17" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q17, "Q17",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q18" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q18, "Q18",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q19" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q19, "Q19",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q20" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q20, "Q20",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q21" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q21, "Q21",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q22" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q22, "Q22",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q23" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q23, "Q23",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q24" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q24, "Q24",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q25" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q25, "Q25",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q26" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q26, "Q28",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q27" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q27, "Q27",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q28" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q28, "Q28",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q29" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q29, "Q29",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q30" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q30, "Q30",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q31" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q31, "Q31",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q32" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q32, "Q32",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q33" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q33, "Q33",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q34" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q34, "Q34",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q35" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q35, "Q35",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q36" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q36, "Q36",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q37" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q37, "Q37",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q38" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q38, "Q38",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q39" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q39, "Q39",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q40" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q40, "Q40",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q41" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q41, "Q41",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q42" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q42, "Q42",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q43" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q43, "Q43",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q44" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q44, "Q44",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q45" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q45, "Q45",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q46" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q46, "Q46",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q47" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q47, "Q47",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q48" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q48, "Q48",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q49" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q49, "Q49",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q50" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q50, "Q50",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q51" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q51, "Q51",
-          tableType, pw, sqlContext)
+          pw, sqlContext)
         /* case "Q52" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q52, "Q52",
-          tableType, pw, sqlContext) */
+          pw, sqlContext) */
         // scalastyle:off println
         case _ => println("OK")
         // scalastyle:on println
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CTQueries.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CTQueries.scala
index f6f1a0cb78..7713b545b0 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CTQueries.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CTQueries.scala
@@ -40,17 +40,20 @@ object CTQueries {
 
   val query8: String = "select SUM(TOTAL_EXECUTED_QTY),SRC_SYS from ORDERS_DETAILS GROUP BY SRC_SYS"
 
-  val query9: String = "select SUM(TOTAL_EXECUTED_QTY),MIN(TOTAL_EXECUTED_QTY),MAX(TOTAL_EXECUTED_QTY),SRC_SYS " +
+  val query9: String = "select SUM(TOTAL_EXECUTED_QTY),MIN(TOTAL_EXECUTED_QTY)," +
+      "MAX(TOTAL_EXECUTED_QTY),SRC_SYS " +
       "from ORDERS_DETAILS WHERE SRC_SYS='APFF' GROUP BY SRC_SYS"
 
   val query10: String = "select count(*) from ORDERS_DETAILS where Src_sys='OATC'"
 
-  val query11: String = "select '5-CTFIX_ORDER' as SrcFl, a.* from ORDERS_DETAILS a , ORDERS_DETAILS b " +
-      "where a.glb_root_order_id = b.glb_root_order_id and a.trd_date >='20160413' and b.trd_date >='20160413' " +
+  val query11: String = "select '5-CTFIX_ORDER' as SrcFl, a.* " +
+      "from ORDERS_DETAILS a , ORDERS_DETAILS b " +
+      "where a.glb_root_order_id = b.glb_root_order_id " +
+      "and a.trd_date >='20160413' and b.trd_date >='20160413' " +
       "and b.src_sys ='CRIO' order by a.glb_root_order_id, a.trd_datE"
 
-  val query12: String = "select '4-CTFIX_ORDER' as SrcFl, a.glb_root_order_id, a.src_sys, count(*) " +
-      "from ORDERS_DETAILS a , ORDERS_DETAILS b " +
+  val query12: String = "select '4-CTFIX_ORDER' as SrcFl, a.glb_root_order_id, a.src_sys, " +
+      "count(*) from ORDERS_DETAILS a , ORDERS_DETAILS b " +
       "where a.glb_root_order_id = b.glb_root_order_id and a.trd_date ='20160413' " +
       "and b.trd_date ='20160413' and b.src_sys ='CRIO' " +
       "group by a.glb_root_order_id, a.src_sys order by a.glb_root_order_id, a.src_sys"
@@ -63,13 +66,15 @@ object CTQueries {
 
   val query15: String = "select '5-CTFIX_ORDER' as SrcFl, * from ORDERS_DETAILS " +
       "where trd_date>='20160413' and glb_root_order_id in " +
-      "( select glb_root_order_id from ORDERS_DETAILS where trd_date>='20160413' and src_sys='CRIO' ) " +
+      "( select glb_root_order_id from ORDERS_DETAILS where trd_date>='20160413' " +
+      "and src_sys='CRIO' ) " +
       "order by glb_root_order_id, trd_datE"
 
   val query16: String = "select '4-CTFIX_ORDER' as SrcFl, glb_root_order_id, src_sys, count(*) " +
       "from ORDERS_DETAILS " +
       "where trd_date='20160413' and glb_root_order_id in " +
-      "( select glb_root_order_id from ORDERS_DETAILS where trd_date='20160413' and src_sys='CRIO') " +
+      "( select glb_root_order_id from ORDERS_DETAILS where trd_date='20160413' " +
+      "and src_sys='CRIO') " +
       "group by glb_root_order_id, src_sys order by glb_root_order_id, src_sys"
 
   val query17: String = "select Event_type_cd, count(1) from ORDERS_DETAILS " +
@@ -82,73 +87,90 @@ object CTQueries {
       "WHERE ESOD.TRD_DATE = '20160413' AND ESOD.EVENT_TYPE_CD = 'NEW_CONF' " +
       "AND ESOD.EXEC_INSTR like '%A%' GROUP BY ESOD.EXEC_INSTR"
 
-  val query20: String = "select EVENT_RCV_TS, EVENT_TS, src_sys,glb_root_src_sys_id,glb_root_order_id, " +
-      "ticker_symbol,SIDE,order_qty,EVENT_TYPE_CD,product_cat_cd,cntry_cd " +
+  val query20: String = "select EVENT_RCV_TS, EVENT_TS, src_sys, glb_root_src_sys_id, " +
+      "glb_root_order_id, ticker_symbol,SIDE,order_qty,EVENT_TYPE_CD,product_cat_cd,cntry_cd " +
       "from ORDERS_DETAILS " +
-      "where trd_date > '20160212' and src_sys='CAIQS' and event_ts not like '%.%' order by EVENT_RCV_TS limit 100 "
+      "where trd_date > '20160212' and src_sys='CAIQS' " +
+      "and event_ts not like '%.%' order by EVENT_RCV_TS limit 100 "
 
 
   val query21: String = "select event_type_cd,event_rcv_ts,event_ts,sent_ts " +
       "from ORDERS_DETAILS " +
-      "where trd_date='20160413' and glb_root_order_id='15344x8c7' and sys_order_id='20151210.92597'"
+      "where trd_date='20160413' and glb_root_order_id='15344x8c7' " +
+      "and sys_order_id='20151210.92597'"
 
-  val query22: String = "select count(*) from EXEC_DETAILS a LEFT JOIN ORDERS_DETAILS b using (sys_root_order_id)"
+  val query22: String = "select count(*) from EXEC_DETAILS a " +
+      "LEFT JOIN ORDERS_DETAILS b using (sys_root_order_id)"
 
-  val query23: String = "(select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, count(*) as trades, sum(shares) as shares " +
+  val query23: String = "(select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, " +
+      "count(*) as trades, sum(shares) as shares " +
       "from " +
-      "(select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional, execs.EXECUTED_QTY as shares, " +
-      "execs.TRD_DATE, case when coalesce(root_exec.flow_cat,root.flow_cat) is null then 'UNKNOWN' else " +
+      "(select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional, " +
+      "execs.EXECUTED_QTY as shares, execs.TRD_DATE, " +
+      "case when coalesce(root_exec.flow_cat,root.flow_cat) is null then 'UNKNOWN' else " +
       "coalesce(root_exec.flow_cat,root.flow_cat) end as ROOT_FLOW_CAT " +
       "from EXEC_DETAILS as execs left join " +
       "( select distinct TRD_DATE,glb_root_order_id,flow_cat " +
       "from EXEC_DETAILS where TRD_DATE in ('20160325','20160413' ) " +
       "and (PRODUCT_CAT_CD is null or PRODUCT_CAT_CD not in ('OPT','FUT','MLEG')) " +
       "and (exec_price_curr_cd = 'USD' OR exec_price_curr_cd is null) " +
-      "and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137') " +
-      "and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id )root_exec on " +
-      "execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id left join " +
-      "( select distinct TRD_DATE, glb_root_order_id,flow_cat " +
+      "and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185'," +
+      "'7','153','163133','80','51','139','137') " +
+      "and sys_order_id = glb_root_order_id " +
+      "and sys_src_sys_id = glb_root_src_sys_id )root_exec on " +
+      "execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id " +
+      "left join " +
+      "(select distinct TRD_DATE, glb_root_order_id,flow_cat " +
       "from ORDERS_DETAILS T " +
       "where T.sys_order_id = T.glb_root_order_id " +
       "and T.sys_src_sys_id = T.glb_root_src_sys_id " +
-      "and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137') " +
+      "and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7'," +
+      "'153','163133','80','51','139','137') " +
       "and T.TRD_DATE in ('20160325','20160413' ) " +
       "and (T.CURR_CD = 'USD' or T.CURR_CD is null) " +
       "and (T.PRODUCT_CAT_CD is null or T.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) ) root on " +
       "execs.trd_date=root.trd_date and execs.glb_root_order_id=root.glb_root_order_id " +
       "where execs.LEAF_EXEC_FG = 'Y' " +
       "and execs.event_type_cd = 'FILLED_CONF' " +
-      "and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137') " +
+      "and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7'," +
+      "'153','163133','80','51','139','137') " +
       "and execs.SYS_ORDER_STAT_CD in ('2','1') " +
       "and execs.TRD_DATE in ('20160325','20160413' ) " +
       "and (execs.PRODUCT_CAT_CD is null or execs.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) " +
       "and (execs.exec_price_curr_cd = 'USD' or execs.exec_price_curr_cd = null) ) " +
       "Aggregated group by TRD_DATE, ROOT_FLOW_CAT order by TRD_DATE ) " +
       "union all " +
-      "( select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, count(*) as trades, sum (shares) as shares " +
+      "(select TRD_DATE, ROOT_FLOW_CAT, sum(Notional) as notional, " +
+      "count(*) as trades, sum (shares) as shares " +
       "from " +
-      "(select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional, execs.EXECUTED_QTY as shares, " +
+      "(select execs.sys_order_id, execs.EXECUTED_QTY * execs.EXEC_PRICE as notional, " +
+      "execs.EXECUTED_QTY as shares, " +
       "execs.TRD_DATE, 'ALL' as ROOT_FLOW_CAT " +
       "from EXEC_DETAILS as execs " +
       "left join ( select distinct TRD_DATE,glb_root_order_id,flow_cat " +
       "from EXEC_DETAILS where TRD_DATE in ('20160325','20160413' ) " +
       "and (PRODUCT_CAT_CD is null or PRODUCT_CAT_CD not in ('OPT','FUT','MLEG')) " +
       "and (exec_price_curr_cd = 'USD' OR exec_price_curr_cd is null) " +
-      "and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137') " +
-      "and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id )root_exec on " +
-      "execs.trd_date=root_exec.trd_date and execs.glb_root_order_id=root_exec.glb_root_order_id left join " +
+      "and sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7'," +
+      "'153','163133','80','51','139','137') " +
+      "and sys_order_id = glb_root_order_id and sys_src_sys_id = glb_root_src_sys_id) " +
+      "root_exec on " +
+      "execs.trd_date=root_exec.trd_date and " +
+      "execs.glb_root_order_id=root_exec.glb_root_order_id left join " +
       "( select distinct TRD_DATE, glb_root_order_id,flow_cat " +
       "from ORDERS_DETAILS T " +
       "where T.sys_order_id = T.glb_root_order_id " +
       "and T.sys_src_sys_id = T.glb_root_src_sys_id " +
-      "and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137') " +
+      "and T.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7'," +
+      "'153','163133','80','51','139','137') " +
       "and T.TRD_DATE in ('20160325','20160413' ) " +
       "and (T.CURR_CD = 'USD' or T.CURR_CD is null) " +
       "and (T.PRODUCT_CAT_CD is null or T.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) ) root on " +
       "execs.trd_date=root.trd_date and execs.glb_root_order_id=root.glb_root_order_id " +
       "where execs.LEAF_EXEC_FG = 'Y' " +
       "and execs.event_type_cd = 'FILLED_CONF' " +
-      "and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7','153','163133','80','51','139','137') " +
+      "and execs.sys_src_sys_id in ('93', '7', '70', '115' ,'6','150','189','31','157','185','7'," +
+      "'153','163133','80','51','139','137') " +
       "and execs.SYS_ORDER_STAT_CD in ('2','1') " +
       "and execs.TRD_DATE in ('20160325','20160413' ) " +
       "and (execs.PRODUCT_CAT_CD is null or execs.PRODUCT_CAT_CD not in ('OPT', 'FUT','MLEG')) " +
@@ -194,7 +216,7 @@ object CTQueries {
   def exec_details_df(sqlContext: SQLContext): DataFrame =
     sqlContext.read.format("com.databricks.spark.csv")
       .option("header", "true")
-      .option("inferSchema","false")
+      .option("inferSchema", "false")
       .option("nullValue", "NULL")
       .load(s"${snc.getConf("dataFilesLocation")}/EXEC_DETAILS.dat")
 
@@ -254,12 +276,14 @@ object CTQueries {
 
   val exec_details_create_ddl =
     "create table exec_details " +
-        "(EXEC_DID BIGINT,SYS_EXEC_VER INTEGER,SYS_EXEC_ID VARCHAR(64),TRD_DATE VARCHAR(20),ALT_EXEC_ID VARCHAR(64)," +
-        "SYS_EXEC_STAT VARCHAR(20),DW_EXEC_STAT VARCHAR(20),ORDER_OWNER_FIRM_ID VARCHAR(20),TRDR_SYS_LOGON_ID VARCHAR(64)," +
-        "CONTRA_BROKER_MNEMONIC VARCHAR(20),SIDE VARCHAR(20),TICKER_SYMBOL VARCHAR(32),SYS_SECURITY_ALT_ID VARCHAR(64)," +
-        "PRODUCT_CAT_CD VARCHAR(20),LAST_MKT VARCHAR(20),EXECUTED_QTY DECIMAL(18, 4),EXEC_PRICE DECIMAL( 38, 18)," +
-        "EXEC_PRICE_CURR_CD VARCHAR(20),EXEC_CAPACITY VARCHAR(20),CLIENT_ACCT_ID BIGINT,FIRM_ACCT_ID BIGINT," +
-        "AVG_PRICE_ACCT_ID BIGINT,OCEAN_ACCT_ID BIGINT,EXEC_CNTRY_CD VARCHAR(20),CMSN VARCHAR(20),COMMENT_TXT VARCHAR(2000)," +
+        "(EXEC_DID BIGINT,SYS_EXEC_VER INTEGER,SYS_EXEC_ID VARCHAR(64),TRD_DATE VARCHAR(20)," +
+        "ALT_EXEC_ID VARCHAR(64), SYS_EXEC_STAT VARCHAR(20),DW_EXEC_STAT VARCHAR(20)," +
+        "ORDER_OWNER_FIRM_ID VARCHAR(20),TRDR_SYS_LOGON_ID VARCHAR(64), CONTRA_BROKER_MNEMONIC " +
+        "VARCHAR(20),SIDE VARCHAR(20),TICKER_SYMBOL VARCHAR(32),SYS_SECURITY_ALT_ID VARCHAR(64)," +
+        "PRODUCT_CAT_CD VARCHAR(20),LAST_MKT VARCHAR(20),EXECUTED_QTY DECIMAL(18, 4)," +
+        "EXEC_PRICE DECIMAL( 38, 18), EXEC_PRICE_CURR_CD VARCHAR(20),EXEC_CAPACITY VARCHAR(20)," +
+        "CLIENT_ACCT_ID BIGINT,FIRM_ACCT_ID BIGINT, AVG_PRICE_ACCT_ID BIGINT,OCEAN_ACCT_ID " +
+        "BIGINT,EXEC_CNTRY_CD VARCHAR(20),CMSN VARCHAR(20),COMMENT_TXT VARCHAR(2000)," +
         "ACT_BRCH_SEQ_TXT VARCHAR(20),IGNORE_CD VARCHAR(20),SRC_SYS VARCHAR(20),EXEC_TYPE_CD VARCHAR(20)," +
         "LIQUIDITY_CD VARCHAR(20),ASK_PRICE DECIMAL( 38, 18),ASK_QTY DECIMAL(18, 4),TRD_REPORT_ASOF_DATE VARCHAR(20)," +
         "BID_PRICE DECIMAL( 38, 18),BID_QTY DECIMAL(18, 4),CROSS_ID VARCHAR(64),NYSE_SUBREPORT_TYPE VARCHAR(20)," +
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
index 4855fdf0b5..9cff218570 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
@@ -19,20 +19,13 @@ package io.snappydata.hydra.ct
 
 import java.io.PrintWriter
 
-import io.snappydata.hydra.TestUtil
+import io.snappydata.hydra.{SnappyTestUtils}
 
 import org.apache.spark.sql.{SQLContext, SnappyContext}
 
 object CTTestUtil {
 
-  def getCurrentDirectory = new java.io.File(".").getCanonicalPath
-
-  def assertQuery(snc: SnappyContext, sqlString: String, queryNum: String, pw: PrintWriter):
-  Any = {
-    pw.println(s"Query execution for $queryNum")
-    val df = snc.sql(sqlString)
-    pw.println("Number of Rows for  : " + sqlString + " is :" + df.count())
-  }
+  def getCurrentDirectory: String = new java.io.File(".").getCanonicalPath
 
   def createReplicatedRowTables(snc: SnappyContext): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl)
@@ -47,13 +40,16 @@ object CTTestUtil {
   def createPartitionedRowTables(snc: SnappyContext, redundancy: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " partition by (SINGLE_ORDER_DID) buckets '11' " +
         "redundancy '" + redundancy + "'")
-    snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) buckets '11' redundancy '" + redundancy + "'")
+    snc.sql(CTQueries.exec_details_create_ddl +  " partition by (EXEC_DID) buckets '11' " +
+        "redundancy '" + redundancy + "'")
   }
 
-  def createPersistPartitionedRowTables(snc: SnappyContext, persistenceMode: String, redundancy: String): Unit = {
-    snc.sql(CTQueries.orders_details_create_ddl + " partition by (SINGLE_ORDER_DID) buckets '11' " +
+  def createPersistPartitionedRowTables(snc: SnappyContext, persistenceMode: String, redundancy:
+  String): Unit = {
+    snc.sql(CTQueries.orders_details_create_ddl + " partition by (SINGLE_ORDER_DID) buckets '11'" +
         "redundancy '" + redundancy + "' PERSISTENT")
-    snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) buckets '11' redundancy '" + redundancy + "' PERSISTENT")
+    snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) buckets '11' redundancy" +
+        " '"  + redundancy + "' PERSISTENT")
   }
 
   def createColocatedRowTables(snc: SnappyContext, redundancy: String): Unit = {
@@ -63,7 +59,8 @@ object CTTestUtil {
         "(orders_details) redundancy '" + redundancy + "' buckets '11'")
   }
 
-  def createPersistColocatedTables(snc: SnappyContext, redundancy: String, persistenceMode: String): Unit = {
+  def createPersistColocatedTables(snc: SnappyContext, redundancy: String, persistenceMode:
+  String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " partition by (SINGLE_ORDER_DID) redundancy '"
         + redundancy + "' buckets '11' persistent")
     snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) colocate with " +
@@ -74,11 +71,13 @@ object CTTestUtil {
   def createRowTablesWithEviction(snc: SnappyContext, redundancy: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " partition by (SINGLE_ORDER_DID) buckets '11' " +
         "redundancy '" + redundancy + "'")
-    snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) buckets '11' redundancy '" + redundancy + "'")
+    snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) buckets '11' redundancy" +
+        " '"  + redundancy + "'")
   }
 
-  //to add eviction attributes
-  def createColocatedRowTablesWithEviction(snc: SnappyContext, redundancy: String, persistenceMode: String): Unit = {
+  // to add eviction attributes
+  def createColocatedRowTablesWithEviction(snc: SnappyContext, redundancy: String,
+      persistenceMode: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " partition by (SINGLE_ORDER_DID) redundancy '"
         + redundancy + "' buckets '11' persistent")
     snc.sql(CTQueries.exec_details_create_ddl + " partition by (EXEC_DID) colocate with " +
@@ -88,13 +87,15 @@ object CTTestUtil {
   def createColumnTables(snc: SnappyContext, redundancy: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " using column options(redundancy '" +
         redundancy + "')")
-    snc.sql(CTQueries.exec_details_create_ddl + " using column options(redundancy '" + redundancy + "')")
+    snc.sql(CTQueries.exec_details_create_ddl + " using column options(redundancy '" +
+        redundancy + "')")
   }
 
   def createPersistColumnTables(snc: SnappyContext, persistenceMode: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " using column options(PERSISTENT '" +
         persistenceMode + "')")
-    snc.sql(CTQueries.exec_details_create_ddl + " using column options(PERSISTENT '" + persistenceMode + "')")
+    snc.sql(CTQueries.exec_details_create_ddl + " using column options(PERSISTENT '" +
+        persistenceMode + "')")
   }
 
   def createColocatedColumnTables(snc: SnappyContext, redundancy: String): Unit = {
@@ -104,21 +105,25 @@ object CTTestUtil {
         "buckets '11', redundancy '" + redundancy + "', COLOCATE_WITH 'ORDERS_DETAILS')")
   }
 
-  def createPersistColocatedColumnTables(snc: SnappyContext, redundancy: String, persistenceMode: String): Unit = {
+  def createPersistColocatedColumnTables(snc: SnappyContext, redundancy: String, persistenceMode:
+  String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " USING column OPTIONS (partition_by " +
-        "'SINGLE_ORDER_DID', buckets '11', PERSISTENT '" + persistenceMode + "', redundancy '" + redundancy + "') ")
+        "'SINGLE_ORDER_DID', buckets '11', PERSISTENT '" + persistenceMode + "', redundancy '" +
+        redundancy + "') ")
     snc.sql(CTQueries.exec_details_create_ddl + " USING column OPTIONS (partition_by 'EXEC_DID', " +
-        "buckets '11', PERSISTENT '" + persistenceMode + "', redundancy '" + redundancy + "',  COLOCATE_WITH 'ORDERS_DETAILS')")
+        "buckets '11', PERSISTENT '" + persistenceMode + "', redundancy '" + redundancy + "',  " +
+        "COLOCATE_WITH 'ORDERS_DETAILS')")
   }
 
   // to add eviction attributes
   def createColumnTablesWithEviction(snc: SnappyContext, redundancy: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " USING column OPTIONS (partition_by " +
         "'SINGLE_ORDER_DID', buckets '11', redundancy '" + redundancy + "')")
-    snc.sql(CTQueries.exec_details_create_ddl + " USING column OPTIONS (partition_by 'EXEC_DID', buckets '11', redundancy '" + redundancy + "')")
+    snc.sql(CTQueries.exec_details_create_ddl + " USING column OPTIONS (partition_by 'EXEC_DID'," +
+        " buckets '11', redundancy '" + redundancy + "')")
   }
 
-  //to add eviction attributes
+  // to add eviction attributes
   def createColocatedColumnTablesWithEviction(snc: SnappyContext, redundancy: String): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl + " USING column OPTIONS (partition_by " +
         "'SINGLE_ORDER_DID', buckets '11', redundancy '" + redundancy + "')")
@@ -139,6 +144,7 @@ object CTTestUtil {
    */
   def createAndLoadSparkTables(sqlContext: SQLContext): Unit = {
     CTQueries.orders_details_df(sqlContext).createOrReplaceTempView("orders_details")
+    // scalastyle:off println
     println(s"orders_details Table created successfully in spark")
     CTQueries.exec_details_df(sqlContext).createOrReplaceTempView("exec_details")
     println(s"exec_details Table created successfully in spark")
@@ -148,68 +154,79 @@ object CTTestUtil {
   Performs validation for tables with the queries. Returns failed queries in a string.
    */
   def executeQueries(snc: SnappyContext, tblType: String, pw: PrintWriter,
-      fullResultSetValidation: Boolean, sqlContext: SQLContext): String = {
-    TestUtil.validateFullResultSet = fullResultSetValidation
-    TestUtil.tableType = tblType
+      fullResultSetValidation: Boolean, sqlContext: SQLContext, numRowsValidation: Boolean): String
+  = {
+    SnappyTestUtils.validateFullResultSet = fullResultSetValidation
+    SnappyTestUtils.tableType = tblType
+    SnappyTestUtils.numRowsValidation = numRowsValidation
     var failedQueries = ""
-    if (TestUtil.validateFullResultSet)
+    if (SnappyTestUtils.validateFullResultSet) {
       CTTestUtil.createAndLoadSparkTables(sqlContext)
+    }
 
     for (q <- CTQueries.queries) {
+      var queryExecuted = true;
       var hasValidationFailed = false;
       q._1 match {
-        case "Q1" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query1, 1, "Q1",
+        case "Q1" => hasValidationFailed = assertQuery(snc, CTQueries.query1, 1, "Q1",
           pw, sqlContext)
-        case "Q2" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query2, 1, "Q2",
+        case "Q2" => hasValidationFailed = assertQuery(snc, CTQueries.query2, 1, "Q2",
           pw, sqlContext)
-        case "Q3" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query3, 1, "Q3",
+        case "Q3" => hasValidationFailed = assertQuery(snc, CTQueries.query3, 1, "Q3",
           pw, sqlContext)
-        case "Q4" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query4, 1, "Q4",
+        case "Q4" => hasValidationFailed = assertQuery(snc, CTQueries.query4, 1, "Q4",
           pw, sqlContext)
-        case "Q5" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query5, 1, "Q5",
+        case "Q5" => hasValidationFailed = assertQuery(snc, CTQueries.query5, 1, "Q5",
           pw, sqlContext)
-        case "Q6" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query6, 5, "Q6",
+        case "Q6" => hasValidationFailed = assertQuery(snc, CTQueries.query6, 5, "Q6",
           pw, sqlContext)
-        case "Q7" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query7, 5, "Q7",
+        case "Q7" => hasValidationFailed = assertQuery(snc, CTQueries.query7, 5, "Q7",
           pw, sqlContext)
-        case "Q8" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query8, 5, "Q8",
+        case "Q8" => hasValidationFailed = assertQuery(snc, CTQueries.query8, 5, "Q8",
           pw, sqlContext)
-        case "Q9" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query9, 1, "Q9",
+        case "Q9" => hasValidationFailed = assertQuery(snc, CTQueries.query9, 1, "Q9",
           pw, sqlContext)
-        case "Q10" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query10, 1, "Q10",
+        case "Q10" => hasValidationFailed = assertQuery(snc, CTQueries.query10, 1, "Q10",
           pw, sqlContext)
-        case "Q11" => hasValidationFailed = TestUtil.assertJoin(snc,CTQueries.query11, 2706, "Q11",
+        case "Q11" => hasValidationFailed = assertJoin(snc, CTQueries.query11, 2706, "Q11",
           pw, sqlContext)
-        case "Q12" => hasValidationFailed = TestUtil.assertJoin(snc,CTQueries.query12, 150, "Q12",
+        case "Q12" => hasValidationFailed = assertJoin(snc, CTQueries.query12, 150, "Q12",
           pw, sqlContext)
-        case "Q13" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query13, 149, "Q13",
+        case "Q13" => hasValidationFailed = assertQuery(snc, CTQueries.query13, 149, "Q13",
           pw, sqlContext)
-        case "Q14" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query14, 149, "Q14",
+        case "Q14" => hasValidationFailed = assertQuery(snc, CTQueries.query14, 149, "Q14",
           pw, sqlContext)
-        case "Q15" => hasValidationFailed = TestUtil.assertJoin(snc,CTQueries.query15, 2620, "Q15",
+        case "Q15" => hasValidationFailed = assertJoin(snc, CTQueries.query15, 2620, "Q15",
           pw, sqlContext)
-        case "Q16" => hasValidationFailed = TestUtil.assertJoin(snc,CTQueries.query16, 150, "Q16",
+        case "Q16" => hasValidationFailed = assertJoin(snc, CTQueries.query16, 150, "Q16",
           pw, sqlContext)
-        case "Q17" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query17, 2, "Q17",
+        case "Q17" => hasValidationFailed = assertQuery(snc, CTQueries.query17, 2, "Q17",
           pw, sqlContext)
-        case "Q18" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query18, 0, "Q18",
+        case "Q18" => hasValidationFailed = assertQuery(snc, CTQueries.query18, 0, "Q18",
           pw, sqlContext)
-        case "Q19" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query19, 47, "Q19",
+        case "Q19" => hasValidationFailed = assertQuery(snc, CTQueries.query19, 47, "Q19",
           pw, sqlContext)
-        case "Q20" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query20, 100, "Q20",
+        case "Q20" => hasValidationFailed = assertQuery(snc, CTQueries.query20, 100, "Q20",
           pw, sqlContext)
-        case "Q21" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query21, 2, "Q21",
+        case "Q21" => hasValidationFailed = assertQuery(snc, CTQueries.query21, 2, "Q21",
           pw, sqlContext)
-        case "Q22" => hasValidationFailed = TestUtil.assertJoin(snc,CTQueries.query22, 1, "Q22",
+        case "Q22" => hasValidationFailed = assertJoin(snc, CTQueries.query22, 1, "Q22",
           pw, sqlContext)
-        //case "Q23" => hasValidationFailed = TestUtil.assertJoin(snc,CTQueries.query23,0,"Q23",
+        // case "Q23" => hasValidationFailed = assertJoin(snc, CTQueries.query23,0,"Q23",
         //   pw,sqlContext)
-        case "Q24" => hasValidationFailed = TestUtil.assertQuery(snc,CTQueries.query24, 999, "Q24",
+        case "Q24" => hasValidationFailed = assertQuery(snc, CTQueries.query24, 999, "Q24",
           pw, sqlContext)
-        case _ => pw.println(s"Query not be executed ${q._1}")
+        case _ =>
+          // scalastyle:off println
+          pw.println(s"Query ${q._1} will not  be executed.")
+          queryExecuted = false
+      }
+      if(queryExecuted){
+        pw.println(s"Execution completed for query ${q._1}")
+      }
+      if (hasValidationFailed) {
+        failedQueries = SnappyTestUtils.addToFailedQueryList(failedQueries, q._1)
       }
-      if (hasValidationFailed)
-        failedQueries = TestUtil.addToFailedQueryList(failedQueries, q._1)
     }
     return failedQueries;
   }
@@ -219,5 +236,50 @@ object CTTestUtil {
     snc.sql("drop table if exists orders_details")
   }
 
+  /*
+  Executes the join query, matches the result with expected result, returns false if the query
+  validation has failed.
+  */
+  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
+  PrintWriter, sqlContext: SQLContext): Boolean = {
+    var validationFailed = false
+    snc.sql("set spark.sql.crossJoin.enabled = true")
+    validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext)
+    return validationFailed
+  }
+
+  /*
+   Executes the query, matches the result with expected result, returns false if the query
+   validation has failed.
+   */
+  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
+  PrintWriter, sqlContext: SQLContext): Boolean = {
+    var validationFailed = false
+    val df = snc.sql(sqlString)
+    val count = df.count
+    println(s"Query $queryNum")
+    df.explain(true)
+    if (SnappyTestUtils.numRowsValidation) {
+      pw.println(s"No. rows in resultset for query ${queryNum} is : ${count} for " +
+          s"${SnappyTestUtils.tableType} table")
+      if (df.count() != numRows) {
+        pw.println(s"Result mismatch for query ${queryNum} : found ${count} rows but expected " +
+            s" ${numRows} rows.")
+        validationFailed = true
+      }
+      pw.flush()
+    }
+    if (SnappyTestUtils.validateFullResultSet) {
+      validationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, sqlString, queryNum,
+        pw, sqlContext)
+    }
+
+    if (validationFailed) {
+      pw.println(s"Failed Query : " + sqlString + "\n Table Type : " + SnappyTestUtils.tableType
+          + "\n")
+    }
+    pw.flush()
+    return validationFailed
+  }
 }
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
index d311dd235e..e7caafe62d 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
@@ -46,6 +46,7 @@ object ValidateCTQueriesApp {
     CTQueries.snc = snc
     val tableType = args(1)
     val fullResultSetValidation: Boolean = args(2).toBoolean
+    val numRowsValidation: Boolean = args(3).toBoolean
     pw.println(s"Validation for queries with ${tableType} tables started")
     if (fullResultSetValidation) {
       pw.println(s"Test will perform fullResultSetValidation")
@@ -55,7 +56,7 @@ object ValidateCTQueriesApp {
     }
     val startTime = System.currentTimeMillis
     val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-      sqlContext)
+      sqlContext,numRowsValidation)
     val endTime = System.currentTimeMillis
     val totalTime = (endTime - startTime) / 1000
     pw.println(s"Total time for execution is :: ${totalTime} seconds.")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
index 4ff5932b54..ae55367f1b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
@@ -42,16 +42,20 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       snc.setConf("dataFilesLocation", dataFilesLocation)
       CTQueries.snc = snc
       pw.println(s"Validation for $tableType tables started in snappy Job")
+      val numRowsValidation: Boolean = jobConfig.getBoolean("numRowsValidation")
       val fullResultSetValidation: Boolean = jobConfig.getBoolean("fullResultSetValidation")
       val sc = SparkContext.getOrCreate()
       val sqlContext = SQLContext.getOrCreate(sc)
+      if(numRowsValidation){
+
+      }
       if (fullResultSetValidation)
         pw.println(s"Test will perform fullResultSetValidation")
       else
         pw.println(s"Test will not perform fullResultSetValidation")
       val startTime = System.currentTimeMillis
       val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-        sqlContext)
+        sqlContext,numRowsValidation)
       val endTime = System.currentTimeMillis
       val totalTime = (endTime - startTime) / 1000
       pw.println(s"Total time for execution is :: ${totalTime} seconds.")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index f74421b3c8..7c6a4e522b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -16,164 +16,14 @@
  */
 package io.snappydata.hydra.northwind
 
-import java.io.{File, PrintWriter}
+import java.io.PrintWriter
+
 import io.snappydata.hydra.SnappyTestUtils
-import scala.io.Source
 
-import org.apache.spark.sql.catalyst.encoders.RowEncoder
 import org.apache.spark.sql._
 
 object NWTestUtil {
 
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-                 tableType: String, pw: PrintWriter): Any = {
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    val df = snc.sql(sqlString)
-    // scalastyle:off println
-    println(s"Query $queryNum")
-    df.explain(true)
-    pw.println(s"Query ${queryNum} \n df.count for join query is : ${df.count} \n Expected " +
-        s"numRows : ${numRows} \n Table Type : ${tableType}")
-    println(s"Query ${queryNum} \n df.count for join query is : ${df.count} \n Expected numRows :" +
-        s" ${numRows} \n Table Type : ${tableType}")
-    // scalastyle:on println
-    assert(df.count() == numRows,
-      s"Mismatch got for query ${queryNum} : df.count -> ${df.count()} but expected numRows " +
-          s"-> $numRows " +
-          s" for query = $sqlString Table Type : $tableType\n" +
-          s"plan : ${df.explain(true)} ")
-    pw.flush()
-  }
-
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-                  tableType: String, pw: PrintWriter): Any = {
-    val df = snc.sql(sqlString)
-    // scalastyle:off println
-    println(s"Query $queryNum")
-    df.explain(true)
-    pw.println(s"Query ${queryNum} \n df.count is : ${df.count} \n Expected numRows : ${numRows} " +
-        s"\n Table Type : ${tableType}")
-    println(s"Query ${queryNum} \n df.count is : ${df.count} \n Expected numRows : ${numRows} \n " +
-        s"Table Type : ${tableType}")
-    // scalastyle:on println
-    assert(df.count() == numRows,
-      s"Mismatch got for query ${queryNum} : df.count -> ${df.count()} but expected numRows " +
-          s"-> $numRows for query = $sqlString Table Type : $tableType")
-    pw.flush()
-  }
-
-  def assertJoinFullResultSet(snc: SnappyContext, sqlString: String, numRows: Int, queryNum:
-  String, tableType: String, pw: PrintWriter, sqlContext: SQLContext): Any = {
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    sqlContext.sql("set spark.sql.crossJoin.enabled = true")
-    assertQueryFullResultSet(snc, sqlString, numRows, queryNum, tableType, pw, sqlContext)
-  }
-
-  def dataTypeConverter(row: Row): Row = {
-    val md = row.toSeq.map {
-      // case d: Double => "%18.1f".format(d).trim().toDouble
-      case d: Double => math.floor(d * 10.0 + 0.5) / 10.0
-      case de: BigDecimal => {
-        de.setScale(2, BigDecimal.RoundingMode.HALF_UP)
-      }
-      case i: Integer => {
-        i
-      }
-      case v => v
-    }
-    Row.fromSeq(md)
-  }
-
-  def writeToFile(df: DataFrame, dest: String, snc: SnappyContext): Unit = {
-    import snc.implicits._
-    df.map(dataTypeConverter)(RowEncoder(df.schema))
-        .map(row => {
-          val sb = new StringBuilder
-          row.toSeq.foreach {
-            case e if e == null =>
-              sb.append("NULL").append(",")
-            case e =>
-              sb.append(e.toString).append(",")
-          }
-          sb.toString()
-        }).write.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat").option(
-      "header", false).save(dest)
-  }
-
-  protected def getTempDir(dirName: String): String = {
-    val log: File = new File(".")
-    var dest: String = null
-    val dirString = log.getCanonicalPath;
-    if (dirName.equals("sparkQueryFiles")) {
-      val logDir = log.listFiles.filter(_.getName.equals("snappyleader.log"))
-      if (!logDir.isEmpty) {
-        val leaderLogFile: File = logDir.iterator.next()
-        if (leaderLogFile.exists()) dest = dirString + File.separator + ".." + File.separator + "" +
-            ".." + File.separator + dirName
-      }
-      else dest = dirString + File.separator + ".." + File.separator + dirName
-    }
-    else dest = log.getCanonicalPath + File.separator + dirName
-    val tempDir: File = new File(dest)
-    if (!tempDir.exists) tempDir.mkdir()
-    return tempDir.getAbsolutePath
-  }
-
-  def assertQueryFullResultSet(snc: SnappyContext, sqlString: String, numRows: Int, queryNum:
-  String, tableType: String, pw: PrintWriter, sqlContext: SQLContext): Any = {
-    // scalastyle:off println
-    var snappyDF = snc.sql(sqlString)
-    var sparkDF = sqlContext.sql(sqlString);
-    val snappyQueryFileName = s"Snappy_${queryNum}.out"
-    val sparkQueryFileName = s"Spark_${queryNum}.out"
-    val snappyDest: String = getTempDir("snappyQueryFiles") + File.separator + snappyQueryFileName
-    val sparkDest: String = getTempDir("sparkQueryFiles") + File.separator + sparkQueryFileName
-    val sparkFile: File = new java.io.File(sparkDest)
-    val snappyFile = new java.io.File(snappyDest)
-    val col1 = sparkDF.schema.fieldNames(0)
-    val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-    if (snappyFile.listFiles() == null) {
-      snappyDF = snappyDF.coalesce(1).orderBy(col1, col: _*)
-      writeToFile(snappyDF, snappyDest, snc)
-      pw.println(s"${queryNum} Result Collected in file $snappyDest")
-    }
-    if (sparkFile.listFiles() == null) {
-      sparkDF = sparkDF.coalesce(1).orderBy(col1, col: _*)
-      writeToFile(sparkDF, sparkDest, snc)
-      pw.println(s"${queryNum} Result Collected in file $sparkDest")
-    }
-    val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
-    val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines
-    var numLines = 0
-    while (expectedLineSet.hasNext && actualLineSet.hasNext) {
-      val expectedLine = expectedLineSet.next()
-      val actualLine = actualLineSet.next()
-      if (!actualLine.equals(expectedLine)) {
-        pw.println(s"\n** For ${queryNum} result mismatch observed**")
-        pw.println(s"\nExpected Result:\n $expectedLine")
-        pw.println(s"\nActual Result:\n $actualLine")
-        pw.println(s"\nQuery =" + sqlString + " Table Type : " + tableType)
-        /* assert(assertion = false, s"\n** For $queryNum result mismatch observed** \n" +
-            s"Expected Result \n: $expectedLine \n" +
-            s"Actual Result   \n: $actualLine \n" +
-            s"Query =" + sqlString + " Table Type : " + tableType)
-         */
-        // Commented due to Q37 failure by just the difference of 0.1 in actual and expected value
-      }
-      numLines += 1
-    }
-    if (actualLineSet.hasNext || expectedLineSet.hasNext) {
-      pw.println(s"\nFor ${queryNum} result count mismatch observed")
-      assert(assertion = false, s"\nFor $queryNum result count mismatch observed")
-    }
-    assert(numLines == numRows, s"\nFor $queryNum result count mismatch " +
-        s"observed: Expected=$numRows, Got=$numLines")
-    pw.flush()
-    // scalastyle:on println
-  }
-
   def createAndLoadReplicatedTables(snc: SnappyContext): Unit = {
 
     snc.sql(NWQueries.regions_table)
@@ -210,204 +60,275 @@ object NWTestUtil {
     NWQueries.employee_territories(snc).write.insertInto("employee_territories")
   }
 
+  /*
+  Method for validating only number of rows with default data for northwind schema size data
+  */
   def validateQueries(snc: SnappyContext, tableType: String, pw: PrintWriter): Unit = {
     for (q <- NWQueries.queries) {
       q._1 match {
-        case "Q1" => assertQuery(snc, NWQueries.Q1, 8, "Q1", tableType, pw)
-        case "Q2" => assertQuery(snc, NWQueries.Q2, 91, "Q2", tableType, pw)
-        case "Q3" => assertQuery(snc, NWQueries.Q3, 830, "Q3", tableType, pw)
-        case "Q4" => assertQuery(snc, NWQueries.Q4, 9, "Q4", tableType, pw)
-        case "Q5" => assertQuery(snc, NWQueries.Q5, 9, "Q5", tableType, pw)
-        case "Q6" => assertQuery(snc, NWQueries.Q6, 9, "Q6", tableType, pw)
-        case "Q7" => assertQuery(snc, NWQueries.Q7, 9, "Q7", tableType, pw)
-        case "Q8" => assertQuery(snc, NWQueries.Q8, 6, "Q8", tableType, pw)
-        case "Q9" => assertQuery(snc, NWQueries.Q9, 3, "Q9", tableType, pw)
-        case "Q10" => assertQuery(snc, NWQueries.Q10, 2, "Q10", tableType, pw)
-        case "Q11" => assertQuery(snc, NWQueries.Q11, 4, "Q11", tableType, pw)
-        case "Q12" => assertQuery(snc, NWQueries.Q12, 2, "Q12", tableType, pw)
-        case "Q13" => assertQuery(snc, NWQueries.Q13, 2, "Q13", tableType, pw)
-        case "Q14" => assertQuery(snc, NWQueries.Q14, 69, "Q14", tableType, pw)
-        case "Q15" => assertQuery(snc, NWQueries.Q15, 5, "Q15", tableType, pw)
-        case "Q16" => assertQuery(snc, NWQueries.Q16, 8, "Q16", tableType, pw)
-        case "Q17" => assertQuery(snc, NWQueries.Q17, 3, "Q17", tableType, pw)
-        case "Q18" => assertQuery(snc, NWQueries.Q18, 9, "Q18", tableType, pw)
-        case "Q19" => assertQuery(snc, NWQueries.Q19, 13, "Q19", tableType, pw)
-        case "Q20" => assertQuery(snc, NWQueries.Q20, 1, "Q20", tableType, pw)
-        case "Q21" => assertQuery(snc, NWQueries.Q21, 1, "Q21", tableType, pw)
-        case "Q22" => assertQuery(snc, NWQueries.Q22, 1, "Q22", tableType, pw)
-        case "Q23" => assertQuery(snc, NWQueries.Q23, 1, "Q23", tableType, pw)
-        case "Q24" => assertQuery(snc, NWQueries.Q24, 4, "Q24", tableType, pw)
-        case "Q25" => assertJoin(snc, NWQueries.Q25, 1, "Q25", tableType, pw)
-        case "Q26" => assertJoin(snc, NWQueries.Q26, 86, "Q26", tableType, pw)
-        case "Q27" => assertJoin(snc, NWQueries.Q27, 9, "Q27", tableType, pw)
-        case "Q28" => assertJoin(snc, NWQueries.Q28, 12, "Q28", tableType, pw)
-        case "Q29" => assertJoin(snc, NWQueries.Q29, 8, "Q29", tableType, pw)
-        case "Q30" => assertJoin(snc, NWQueries.Q30, 8, "Q30", tableType, pw)
-        case "Q31" => assertJoin(snc, NWQueries.Q31, 830, "Q31", tableType, pw)
-        case "Q32" => assertJoin(snc, NWQueries.Q32, 8, "Q32", tableType, pw)
-        case "Q33" => assertJoin(snc, NWQueries.Q33, 37, "Q33", tableType, pw)
-        case "Q34" => assertJoin(snc, NWQueries.Q34, 5, "Q34", tableType, pw)
-        case "Q35" => assertJoin(snc, NWQueries.Q35, 3, "Q35", tableType, pw)
-        case "Q36" => assertJoin(snc, NWQueries.Q36, 290, "Q36", tableType, pw)
-        case "Q37" => assertJoin(snc, NWQueries.Q37, 77, "Q37", tableType, pw)
-        case "Q38" => assertJoin(snc, NWQueries.Q38, 2155, "Q38", tableType, pw)
-        case "Q39" => assertJoin(snc, NWQueries.Q39, 9, "Q39", tableType, pw)
-        case "Q40" => assertJoin(snc, NWQueries.Q40, 830, "Q40", tableType, pw)
-        case "Q41" => assertJoin(snc, NWQueries.Q41, 2155, "Q41", tableType, pw)
-        case "Q42" => assertJoin(snc, NWQueries.Q42, 22, "Q42", tableType, pw)
-        case "Q43" => assertJoin(snc, NWQueries.Q43, 830, "Q43", tableType, pw)
-        case "Q44" => assertJoin(snc, NWQueries.Q44, 830, "Q44", tableType, pw) // LeftSemiJoinHash
-        case "Q45" => assertJoin(snc, NWQueries.Q45, 1788650, "Q45", tableType, pw)
-        case "Q46" => assertJoin(snc, NWQueries.Q46, 1788650, "Q46", tableType, pw)
-        case "Q47" => assertJoin(snc, NWQueries.Q47, 1788650, "Q47", tableType, pw)
-        case "Q48" => assertJoin(snc, NWQueries.Q48, 1788650, "Q48", tableType, pw)
-        case "Q49" => assertJoin(snc, NWQueries.Q49, 1788650, "Q49", tableType, pw)
-        case "Q50" => assertJoin(snc, NWQueries.Q50, 2155, "Q50", tableType, pw)
-        case "Q51" => assertJoin(snc, NWQueries.Q51, 2155, "Q51", tableType, pw)
-        case "Q52" => assertJoin(snc, NWQueries.Q52, 2155, "Q52", tableType, pw)
-        case "Q53" => assertJoin(snc, NWQueries.Q53, 2155, "Q53", tableType, pw)
-        case "Q54" => assertJoin(snc, NWQueries.Q54, 2155, "Q54", tableType, pw)
-        case "Q55" => assertJoin(snc, NWQueries.Q55, 21, "Q55", tableType, pw)
-        case "Q56" => assertJoin(snc, NWQueries.Q56, 8, "Q56", tableType, pw)
-        case "Q57" => assertJoin(snc, NWQueries.Q57, 120, "Q57", tableType, pw)
-        case "Q58" => assertJoin(snc, NWQueries.Q58, 1, "Q58", tableType, pw)
-        case "Q59" => assertJoin(snc, NWQueries.Q59, 1, "Q59", tableType, pw)
-        case "Q60" => assertJoin(snc, NWQueries.Q60, 947, "Q60", tableType, pw)
+        case "Q1" => SnappyTestUtils.assertQuery(snc, NWQueries.Q1, 8, "Q1", pw)
+        case "Q2" => SnappyTestUtils.assertQuery(snc, NWQueries.Q2, 91, "Q2",  pw)
+        case "Q3" => SnappyTestUtils.assertQuery(snc, NWQueries.Q3, 830, "Q3",  pw)
+        case "Q4" => SnappyTestUtils.assertQuery(snc, NWQueries.Q4, 9, "Q4",  pw)
+        case "Q5" => SnappyTestUtils.assertQuery(snc, NWQueries.Q5, 9, "Q5",  pw)
+        case "Q6" => SnappyTestUtils.assertQuery(snc, NWQueries.Q6, 9, "Q6",  pw)
+        case "Q7" => SnappyTestUtils.assertQuery(snc, NWQueries.Q7, 9, "Q7",  pw)
+        case "Q8" => SnappyTestUtils.assertQuery(snc, NWQueries.Q8, 6, "Q8",  pw)
+        case "Q9" => SnappyTestUtils.assertQuery(snc, NWQueries.Q9, 3, "Q9",  pw)
+        case "Q10" => SnappyTestUtils.assertQuery(snc, NWQueries.Q10, 2, "Q10",  pw)
+        case "Q11" => SnappyTestUtils.assertQuery(snc, NWQueries.Q11, 4, "Q11",  pw)
+        case "Q12" => SnappyTestUtils.assertQuery(snc, NWQueries.Q12, 2, "Q12",  pw)
+        case "Q13" => SnappyTestUtils.assertQuery(snc, NWQueries.Q13, 2, "Q13",  pw)
+        case "Q14" => SnappyTestUtils.assertQuery(snc, NWQueries.Q14, 69, "Q14",  pw)
+        case "Q15" => SnappyTestUtils.assertQuery(snc, NWQueries.Q15, 5, "Q15",  pw)
+        case "Q16" => SnappyTestUtils.assertQuery(snc, NWQueries.Q16, 8, "Q16",  pw)
+        case "Q17" => SnappyTestUtils.assertQuery(snc, NWQueries.Q17, 3, "Q17",  pw)
+        case "Q18" => SnappyTestUtils.assertQuery(snc, NWQueries.Q18, 9, "Q18",  pw)
+        case "Q20" => SnappyTestUtils.assertQuery(snc, NWQueries.Q20, 1, "Q20",  pw)
+        case "Q21" => SnappyTestUtils.assertQuery(snc, NWQueries.Q21, 1, "Q21",  pw)
+        case "Q22" => SnappyTestUtils.assertQuery(snc, NWQueries.Q22, 1, "Q22",  pw)
+        case "Q23" => SnappyTestUtils.assertQuery(snc, NWQueries.Q23, 1, "Q23",  pw)
+        case "Q24" => SnappyTestUtils.assertQuery(snc, NWQueries.Q24, 4, "Q24",  pw)
+        case "Q25" => SnappyTestUtils.assertJoin(snc, NWQueries.Q25, 1, "Q25",  pw)
+        case "Q26" => SnappyTestUtils.assertJoin(snc, NWQueries.Q26, 86, "Q26",  pw)
+        case "Q27" => SnappyTestUtils.assertJoin(snc, NWQueries.Q27, 9, "Q27",  pw)
+        case "Q28" => SnappyTestUtils.assertJoin(snc, NWQueries.Q28, 12, "Q28",  pw)
+        case "Q29" => SnappyTestUtils.assertJoin(snc, NWQueries.Q29, 8, "Q29",  pw)
+        case "Q30" => SnappyTestUtils.assertJoin(snc, NWQueries.Q30, 8, "Q30",  pw)
+        case "Q31" => SnappyTestUtils.assertJoin(snc, NWQueries.Q31, 830, "Q31",  pw)
+        case "Q32" => SnappyTestUtils.assertJoin(snc, NWQueries.Q32, 8, "Q32",  pw)
+        case "Q33" => SnappyTestUtils.assertJoin(snc, NWQueries.Q33, 37, "Q33",  pw)
+        case "Q34" => SnappyTestUtils.assertJoin(snc, NWQueries.Q34, 5, "Q34",  pw)
+        case "Q35" => SnappyTestUtils.assertJoin(snc, NWQueries.Q35, 3, "Q35",  pw)
+        case "Q36" => SnappyTestUtils.assertJoin(snc, NWQueries.Q36, 290, "Q36",  pw)
+        case "Q37" => SnappyTestUtils.assertJoin(snc, NWQueries.Q37, 77, "Q37",  pw)
+        case "Q38" => SnappyTestUtils.assertJoin(snc, NWQueries.Q38, 2155, "Q38",  pw)
+        case "Q39" => SnappyTestUtils.assertJoin(snc, NWQueries.Q39, 9, "Q39",  pw)
+        case "Q40" => SnappyTestUtils.assertJoin(snc, NWQueries.Q40, 830, "Q40",  pw)
+        case "Q41" => SnappyTestUtils.assertJoin(snc, NWQueries.Q41, 2155, "Q41",  pw)
+        case "Q42" => SnappyTestUtils.assertJoin(snc, NWQueries.Q42, 22, "Q42",  pw)
+        case "Q43" => SnappyTestUtils.assertJoin(snc, NWQueries.Q43, 830, "Q43",  pw)
+        case "Q44" => SnappyTestUtils.assertJoin(snc, NWQueries.Q44, 830, "Q44", pw) //LeftSemiJoinHash
+        case "Q45" => SnappyTestUtils.assertJoin(snc, NWQueries.Q45, 1788650, "Q45", pw)
+        case "Q46" => SnappyTestUtils.assertJoin(snc, NWQueries.Q46, 1788650, "Q46", pw)
+        case "Q47" => SnappyTestUtils.assertJoin(snc, NWQueries.Q47, 1788650, "Q47", pw)
+        case "Q48" => SnappyTestUtils.assertJoin(snc, NWQueries.Q48, 1788650, "Q48", pw)
+        case "Q49" => SnappyTestUtils.assertJoin(snc, NWQueries.Q49, 1788650, "Q49", pw)
+        case "Q50" => SnappyTestUtils.assertJoin(snc, NWQueries.Q50, 2155, "Q50", pw)
+        case "Q51" => SnappyTestUtils.assertJoin(snc, NWQueries.Q51, 2155, "Q51", pw)
+        case "Q52" => SnappyTestUtils.assertJoin(snc, NWQueries.Q52, 2155, "Q52", pw)
+        case "Q53" => SnappyTestUtils.assertJoin(snc, NWQueries.Q53, 2155, "Q53", pw)
+        case "Q54" => SnappyTestUtils.assertJoin(snc, NWQueries.Q54, 2155, "Q54", pw)
+        case "Q55" => SnappyTestUtils.assertJoin(snc, NWQueries.Q55, 21, "Q55", pw)
+        case "Q56" => SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56", pw)
+        case "Q57" => SnappyTestUtils.assertJoin(snc, NWQueries.Q57, 120, "Q57", pw)
+        case "Q58" => SnappyTestUtils.assertJoin(snc, NWQueries.Q58, 1, "Q58", pw)
+        case "Q59" => SnappyTestUtils.assertJoin(snc, NWQueries.Q59, 1, "Q59", pw)
+        case "Q60" => SnappyTestUtils.assertJoin(snc, NWQueries.Q60, 947, "Q60", pw)
         // scalastyle:off println
-        case _ => println("OK")
+        case _ => println(s"Did not execute query ${q._1}")
         // scalastyle:on println
       }
     }
   }
 
   def validateQueriesFullResultSet(snc: SnappyContext, tableType: String, pw: PrintWriter,
-                                   sqlContext: SQLContext): Unit = {
+                                   sqlContext: SQLContext): String = {
+    SnappyTestUtils.tableType = tableType
+    var failedQueries = ""
     for (q <- NWQueries.queries) {
+      var queryExecuted = true;
+      var hasValidationFailed = false;
       q._1 match {
-        case "Q1" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q1, "Q1", tableType,
-          pw, sqlContext)
-        case "Q2" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q2, "Q2", tableType,
-          pw, sqlContext)
-        case "Q3" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q3, "Q3", tableType,
-          pw, sqlContext)
-        case "Q4" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q4, "Q4", tableType,
-          pw, sqlContext)
-        case "Q5" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q5, "Q5", tableType,
-          pw, sqlContext)
-        case "Q6" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q6, "Q6", tableType,
-          pw, sqlContext)
-        case "Q7" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q7, "Q7", tableType,
-          pw, sqlContext)
-        case "Q8" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q8, "Q8", tableType,
-          pw, sqlContext)
-        case "Q9" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q9, "Q9", tableType,
-          pw, sqlContext)
-        case "Q10" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q10, "Q10",
-          tableType, pw, sqlContext)
-        case "Q11" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q11, "Q11",
-          tableType, pw, sqlContext)
-        case "Q12" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q12, "Q12",
-          tableType, pw, sqlContext)
-        case "Q13" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q13, "Q13",
-          tableType, pw, sqlContext)
-        case "Q14" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q14, "Q14",
-          tableType, pw, sqlContext)
-        case "Q15" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q15, "Q15",
-          tableType, pw, sqlContext)
-        case "Q16" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q16, "Q16",
-          tableType, pw, sqlContext)
-        case "Q17" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q17, "Q17",
-          tableType, pw, sqlContext)
-        case "Q18" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q18, "Q18",
-          tableType, pw, sqlContext)
-        case "Q19" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q19, "Q19",
-          tableType, pw, sqlContext)
-        case "Q20" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q20, "Q20",
-          tableType, pw, sqlContext)
-        case "Q21" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q21, "Q21",
-          tableType, pw, sqlContext)
-        case "Q22" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q22, "Q22",
-          tableType, pw, sqlContext)
-        case "Q23" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q23, "Q23",
-          tableType, pw, sqlContext)
-        case "Q24" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q24, "Q24",
-          tableType, pw, sqlContext)
-        case "Q25" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q25, "Q25",
-          tableType, pw, sqlContext)
-        case "Q26" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q26, "Q26",
-          tableType, pw, sqlContext)
-        case "Q27" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q27, "Q27",
-          tableType, pw, sqlContext)
-        case "Q28" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q28, "Q28",
-          tableType, pw, sqlContext)
-        case "Q29" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q29, "Q29",
-          tableType, pw, sqlContext)
-        case "Q30" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q30, "Q30",
-          tableType, pw, sqlContext)
-        case "Q31" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q31, "Q31",
-          tableType, pw, sqlContext)
-        case "Q32" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q32, "Q32",
-          tableType, pw, sqlContext)
-        case "Q33" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q33, "Q33",
-          tableType, pw, sqlContext)
-        case "Q34" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q34, "Q34",
-          tableType, pw, sqlContext)
-        case "Q35" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q35, "Q35",
-          tableType, pw, sqlContext)
-        case "Q36" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
-          tableType, pw, sqlContext)
-        case "Q37" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
-          tableType, pw, sqlContext)
-        case "Q38" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
-          tableType, pw, sqlContext)
-        case "Q39" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q39, "Q39",
-          tableType, pw, sqlContext)
-        case "Q40" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q40, "Q40",
-          tableType, pw, sqlContext)
-        case "Q41" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q41, "Q41",
-          tableType, pw, sqlContext)
-        case "Q42" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q42, "Q42",
-          tableType, pw, sqlContext)
-        case "Q43" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q43, "Q43",
-          tableType, pw, sqlContext)
-        case "Q44" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q44, "Q44",
-          tableType, pw, sqlContext) // LeftSemiJoinHash
-        case "Q45" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q45, "Q45",
-          tableType, pw, sqlContext)
-        case "Q46" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q46, "Q46",
-          tableType, pw, sqlContext)
-        case "Q47" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q47, "Q47",
-          tableType, pw, sqlContext)
-        case "Q48" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q48, "Q48",
-          tableType, pw, sqlContext)
-        case "Q49" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q49, "Q49",
-          tableType, pw, sqlContext)
-        case "Q50" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q50, "Q50",
-          tableType, pw, sqlContext)
-        case "Q51" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q51, "Q51",
-          tableType, pw, sqlContext)
-        case "Q52" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q52, "Q52",
-          tableType, pw, sqlContext)
-        case "Q53" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q53, "Q53",
-          tableType, pw, sqlContext)
-        case "Q54" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q54, "Q54",
-          tableType, pw, sqlContext)
-        case "Q55" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
-          tableType, pw, sqlContext)
-        case "Q56" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
-          tableType, pw, sqlContext)
-        case "Q57" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q57, "Q57",
-          tableType, pw, sqlContext)
-        case "Q58" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q58, "Q58",
-          tableType, pw, sqlContext)
-        case "Q59" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q59, "Q59",
-          tableType, pw, sqlContext)
-        case "Q60" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q60, "Q60",
-          tableType, pw, sqlContext)
+        case "Q1" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q1, "Q1",
+             pw, sqlContext)
+        case "Q2" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q2, "Q2",
+             pw, sqlContext)
+        case "Q3" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q3, "Q3",
+             pw, sqlContext)
+        case "Q4" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q4, "Q4",
+             pw, sqlContext)
+        case "Q5" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q5, "Q5",
+             pw, sqlContext)
+        case "Q6" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q6, "Q6",
+             pw, sqlContext)
+        case "Q7" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q7, "Q7",
+             pw, sqlContext)
+        case "Q8" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q8, "Q8",
+             pw, sqlContext)
+        case "Q9" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q9, "Q9",
+             pw, sqlContext)
+        case "Q10" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q10, "Q10",
+             pw, sqlContext)
+        case "Q11" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q11, "Q11",
+             pw, sqlContext)
+        case "Q12" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q12, "Q12",
+              pw, sqlContext)
+        case "Q13" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q13, "Q13",
+             pw, sqlContext)
+        case "Q14" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q14, "Q14",
+             pw, sqlContext)
+        case "Q15" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q15, "Q15",
+             pw, sqlContext)
+        case "Q16" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q16, "Q16",
+             pw, sqlContext)
+        case "Q17" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q17, "Q17",
+             pw, sqlContext)
+        case "Q18" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q18, "Q18",
+             pw, sqlContext)
+        case "Q19" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q19, "Q19",
+             pw, sqlContext)
+        case "Q20" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q20, "Q20",
+             pw, sqlContext)
+        case "Q21" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q21, "Q21",
+             pw, sqlContext)
+        case "Q22" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q22, "Q22",
+             pw, sqlContext)
+        case "Q23" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q23, "Q23",
+             pw, sqlContext)
+        case "Q24" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q24, "Q24",
+             pw, sqlContext)
+        case "Q25" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q25, "Q25",
+             pw, sqlContext)
+        case "Q26" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q26, "Q26",
+             pw, sqlContext)
+        case "Q27" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q27, "Q27",
+             pw, sqlContext)
+        case "Q28" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q28, "Q28",
+             pw, sqlContext)
+        case "Q29" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q29, "Q29",
+             pw, sqlContext)
+        case "Q30" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q30, "Q30",
+             pw, sqlContext)
+        case "Q31" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q31, "Q31",
+             pw, sqlContext)
+        case "Q32" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q32, "Q32",
+             pw, sqlContext)
+        case "Q33" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q33, "Q33",
+             pw, sqlContext)
+        case "Q34" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q34, "Q34",
+             pw, sqlContext)
+        case "Q35" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q35, "Q35",
+             pw, sqlContext)
+        case "Q36" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
+             pw, sqlContext)
+        case "Q37" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
+             pw, sqlContext)
+        case "Q38" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
+             pw, sqlContext)
+        case "Q39" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q39, "Q39",
+             pw, sqlContext)
+        case "Q40" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q40, "Q40",
+             pw, sqlContext)
+        case "Q41" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q41, "Q41",
+             pw, sqlContext)
+        case "Q42" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q42, "Q42",
+             pw, sqlContext)
+        case "Q43" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q43, "Q43",
+             pw, sqlContext)
+        case "Q44" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q44, "Q44",
+             pw, sqlContext) // LeftSemiJoinHash
+        case "Q45" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q45, "Q45",
+             pw, sqlContext)
+        case "Q46" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q46, "Q46",
+             pw, sqlContext)
+        case "Q47" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q47, "Q47",
+             pw, sqlContext)
+        case "Q48" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q48, "Q48",
+             pw, sqlContext)
+        case "Q49" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q49, "Q49",
+             pw, sqlContext)
+        case "Q50" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q50, "Q50",
+             pw, sqlContext)
+        case "Q51" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q51, "Q51",
+             pw, sqlContext)
+        case "Q52" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q52, "Q52",
+             pw, sqlContext)
+        case "Q53" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q53, "Q53",
+             pw, sqlContext)
+        case "Q54" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q54, "Q54",
+             pw, sqlContext)
+        case "Q55" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
+             pw, sqlContext)
+        case "Q56" =>
+          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
+             pw, sqlContext)
+        case "Q57" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q57, "Q57",
+             pw, sqlContext)
+        case "Q58" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q58, "Q58",
+             pw, sqlContext)
+        case "Q59" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q59, "Q59",
+              pw, sqlContext)
+        case "Q60" =>
+          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q60, "Q60",
+             pw, sqlContext)
         // scalastyle:off println
-        case _ => println("OK")
+        case _ => println(s"Did not execute query ${q._1}")
       }
+      if(queryExecuted)
+        pw.println(s"Execution completed for query ${q._1}")
+      if (hasValidationFailed)
+        failedQueries = SnappyTestUtils.addToFailedQueryList(failedQueries, q._1)
     }
+    return failedQueries;
   }
 
   def validateSelectiveQueriesFullResultSet(snc: SnappyContext, tableType: String, pw:
@@ -415,127 +336,127 @@ object NWTestUtil {
     for (q <- NWQueries.queries) {
       q._1 match {
         // case "Q1" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q1, "Q1",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         // case "Q2" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q2, "Q2",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         // case "Q3" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q3, "Q3",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         // case "Q4" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q4, "Q4",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         // case "Q5" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q5, "Q5",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         case "Q6" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q6, "Q6",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q7" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q7, "Q7",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         // case "Q8" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q8, "Q8",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         case "Q9" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q9, "Q9",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         // case "Q10" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q10, "Q10",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         case "Q11" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q11, "Q11",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q12" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q12, "Q12",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q13" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q13, "Q13",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q14" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q14, "Q14",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q15" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q15, "Q15",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q16" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q16, "Q16",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q17" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q17, "Q17",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q18" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q18, "Q18",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q19" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q19, "Q19",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q20" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q20, "Q20",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q21" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q21, "Q21",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q22" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q22, "Q22",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         // case "Q23" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q23, "Q23",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         case "Q24" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q24, "Q24",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q25" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q25, "Q25",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q26" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q26, "Q26",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q27" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q27, "Q27",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q28" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q28, "Q28",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         // case "Q29" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q29, "Q29",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         case "Q30" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q30, "Q30",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q31" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q31, "Q31",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q32" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q32, "Q32",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q33" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q33, "Q33",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q34" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q34, "Q34",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q35" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q35, "Q35",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q36" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q37" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q38" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q39" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q39, "Q39",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q40" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q40, "Q40",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q41" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q41, "Q41",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q42" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q42, "Q42",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q43" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q43, "Q43",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q44" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q44, "Q44",
-          tableType, pw, sqlContext) // LeftSemiJoinHash
+           pw, sqlContext) // LeftSemiJoinHash
         case "Q45" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q45, "Q45",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         /* case "Q46" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q46, "Q46",
-        tableType, pw, sqlContext)
+         pw, sqlContext)
         case "Q47" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q47, "Q47",
-        tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q48" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q48, "Q48",
-        tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q49" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q49, "Q49",
-        tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q50" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q50, "Q50",
-        tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q51" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q51, "Q51",
-        tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q52" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q52, "Q52",
-        tableType, pw, sqlContext)
+          pw, sqlContext)
         case "Q53" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q53, "Q53",
-        tableType, pw, sqlContext) */
+          pw, sqlContext) */
         case "Q54" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q54, "Q54",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q55" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q56" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q57" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q57, "Q57",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q58" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q58, "Q58",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         case "Q59" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q59, "Q59",
-          tableType, pw, sqlContext)
+           pw, sqlContext)
         // case "Q60" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q60,"Q60",
-        // tableType, pw, sqlContext)
+        //  pw, sqlContext)
         // scalastyle:off println
-        case _ => println("OK")
+        case _ => println(s"Did not execute query ${q._1}")
         // scalastyle:on println
       }
     }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index 2ec64bb2ff..d2cec55054 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -40,6 +40,7 @@ object ValidateNWQueriesApp {
     val fullResultSetValidation: Boolean = args(2).toBoolean
     val numRowsValidation: Boolean = args(4).toBoolean
     val isSmokeRun: Boolean = args(3).toBoolean
+    def getCurrentDirectory = new java.io.File(".").getCanonicalPath
     val threadID = Thread.currentThread().getId
     val outputFile = "ValidateNWQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
@@ -66,7 +67,15 @@ object ValidateNWQueriesApp {
         NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
       }
       else {
-        NWTestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)
+        val failedQueries = NWTestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)
+        if (!failedQueries.isEmpty) {
+          println(s"Validation failed for ${tableType} for queries ${failedQueries}.. See " +
+              s"${getCurrentDirectory}/${outputFile}")
+          pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
+          pw.close()
+          throw new Exception(s"Validation task failed for ${tableType}. See " +
+              s"${getCurrentDirectory}/${outputFile}")
+        }
       }
       pw.println(s"validateQueriesFullResultSet ${tableType} tables Queries Test completed  " +
           s"successfully at : " + System.currentTimeMillis)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
index b28700d679..0d21dae881 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
@@ -18,6 +18,7 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import util.TestException
 import com.typesafe.config.Config
 import io.snappydata.hydra.northwind
 import org.apache.spark.SparkContext
@@ -66,7 +67,13 @@ class ValidateNWQueriesJob extends SnappySQLJob {
           NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
         }
         else {
-          NWTestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)
+          val failedQueries = NWTestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)
+          if(!failedQueries.isEmpty) {
+            println(s"Validation failed for ${tableType} for queries ${failedQueries}. See ${getCurrentDirectory}/${outputFile}")
+            pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
+            pw.close()
+            throw new TestException(s"Validation task failed for ${tableType}. See ${getCurrentDirectory}/${outputFile}")
+          }
         }
         pw.println(s"validateQueriesFullResultSet ${tableType} tables Queries Test completed  " +
             s"successfully at : " + System.currentTimeMillis)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTestUtils.scala
index c1b69e06d3..d7f070cd2d 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTestUtils.scala
@@ -20,24 +20,23 @@ package io.snappydata.hydra.snapshotIsolation
 import java.io.{File, PrintWriter}
 import java.util
 
-import io.snappydata.hydra.TestUtil
-
 import org.apache.spark.sql.{Row, DataFrame, SnappyContext}
 
 class SnapshotIsolationTestUtils {
 
-  def executeQueries(snc:SnappyContext,pw:PrintWriter):Any = {
-    assertQuery(SnapshotIsolationQueries.Q1,"Q1",snc,pw)
+  def executeQueries(snc: SnappyContext, pw: PrintWriter): Any = {
+    assertQuery(SnapshotIsolationQueries.Q1, "Q1", snc, pw)
   }
 
-  def assertQuery(sqlString: String,queryNum: String,snc: SnappyContext,pw: PrintWriter): Any ={
-    val time =  System.currentTimeMillis()
+  def assertQuery(sqlString: String, queryNum: String, snc: SnappyContext, pw: PrintWriter): Any = {
+    val time = System.currentTimeMillis()
     var snappyDF = snc.sql(sqlString)
     try {
-      verifyDuplicateRows(snappyDF,pw)
-      //TestUtil.compareFiles(snappyFile, newDFFile, pw, false)
+      verifyDuplicateRows(snappyDF, pw)
+      // TestUtil.compareFiles(snappyFile, newDFFile, pw, false)
     } catch {
       case ex: Exception => {
+        // scalastyle:off println
         pw.println(s"Verification failed for ${queryNum} with following exception:\n")
         ex.printStackTrace(pw)
       }
@@ -50,6 +49,7 @@ class SnapshotIsolationTestUtils {
     val dupList: util.List[Row] = new util.ArrayList[Row]()
     val rowList = df.collectAsList()
     val numRows = rowList.size().asInstanceOf[Int]
+    // scalastyle:off println
     pw.println(s"Num rows in resultSet are ${numRows} and rows are:")
     for ( i <- 0 to numRows) {
       val row = rowList.get(i)
@@ -69,6 +69,6 @@ class SnapshotIsolationTestUtils {
     }
   }
 
-  def verifyResults(snappyFile: File,pw: PrintWriter): Unit ={
+  def verifyResults(snappyFile: File, pw: PrintWriter): Unit = {
   }
 }

From 9e5fd71622aacfeef74395a59b98693e4b612a5d Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Wed, 12 Jul 2017 11:42:08 +0530
Subject: [PATCH 02/33] - Scala formating changes

---
 .../io/snappydata/hydra/SnappyTestUtils.scala | 13 ++++--
 .../hydra/ct/CreateAndLoadCTTablesJob.scala   | 42 +++++++++++--------
 2 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index dcb4594236..0bfa36d01a 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -202,18 +202,23 @@ object SnappyTestUtils {
         pw.println(s"Actual Result   : $actualLine")
       }
     }
+    // scalastyle:off println
     if (actualLineSet.hasNext || expectedLineSet.hasNext) {
       hasValidationFailed = true
       if (actualLineSet.hasNext) {
         pw.println(s"Following ${actualLineSet.size} rows are unexpected in Snappy:")
+        while (actualLineSet.hasNext) {
+          val txt: String = actualLineSet.next()
+          pw.println(s"$txt")
+        }
       }
-      while (actualLineSet.hasNext)
-        pw.println(actualLineSet.next())
       if (expectedLineSet.hasNext) {
         pw.println(s"Following ${expectedLineSet.size} rows are missing in Snappy:")
+        while (expectedLineSet.hasNext) {
+          val txt: String = actualLineSet.next()
+          pw.println(s"$txt")
+        }
       }
-      while (expectedLineSet.hasNext)
-        pw.println(expectedLineSet.next())
     }
     hasValidationFailed
   }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala
index 1507f03e33..0641c04349 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala
@@ -29,6 +29,7 @@ class CreateAndLoadCTTablesJob extends SnappySQLJob {
   override def runSnappyJob(snSession: SnappySession, jobConfig: Config): Any = {
     val pw = new PrintWriter(new FileOutputStream(new File("CreateAndLoadCTTablesJob.out"), true));
     val tableType = jobConfig.getString("tableType")
+    // scalastyle:off println
     pw.println("In create and load tables Job")
     Try {
       val snc = snSession.sqlContext
@@ -36,29 +37,36 @@ class CreateAndLoadCTTablesJob extends SnappySQLJob {
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
       val redundancy = jobConfig.getString("redundancy")
       pw.println(s"Data files are at : ${dataFilesLocation}")
-      snc.setConf("dataFilesLocation",dataFilesLocation)
+      snc.setConf("dataFilesLocation", dataFilesLocation)
       CTQueries.snc = snc
       CTTestUtil.dropTables(snc)
       pw.println(s"Create and load for ${tableType} tables has started...")
       pw.flush()
       tableType match {
-        //replicated row tables
+        // replicated row tables
         case "Replicated" => CTTestUtil.createReplicatedRowTables(snc)
-        case "PersistentReplicated" => CTTestUtil.createPersistReplicatedRowTables(snc,jobConfig.getString("persistenceMode"))
-        //partitioned row tables
-        case "PartitionedRow" => CTTestUtil.createPartitionedRowTables(snc,redundancy)
-        case "PersistentPartitionRow" => CTTestUtil.createPersistPartitionedRowTables(snc,redundancy,jobConfig.getString("persistenceMode"))
-        case "ColocatedRow" => CTTestUtil.createColocatedRowTables(snc,redundancy)
-        case "EvictionRow"=> CTTestUtil.createRowTablesWithEviction(snc,redundancy)
-        case "PersistentColocatedRow" => CTTestUtil.createPersistColocatedTables(snc,redundancy,jobConfig.getString("persistenceMode"))
-        case "ColocatedWithEvictionRow" => CTTestUtil.createColocatedRowTablesWithEviction(snc,redundancy,jobConfig.getString("persistenceMode"))
-        //column tables
-        case "Column" => CTTestUtil.createColumnTables(snc,redundancy)
-        case "PersistentColumn" => CTTestUtil.createPersistColumnTables(snc,jobConfig.getString("persistenceMode"))
-        case "ColocatedColumn" => CTTestUtil.createColocatedColumnTables(snc,redundancy)
-        case "EvictionColumn" => CTTestUtil.createColumnTablesWithEviction(snc,redundancy)
-        case "PersistentColocatedColumn" => CTTestUtil.createPersistColocatedColumnTables(snc,redundancy,jobConfig.getString("persistenceMode"))
-        case "ColocatedWithEvictionColumn" => CTTestUtil.createColocatedColumnTablesWithEviction(snc,redundancy)
+        case "PersistentReplicated" =>
+          CTTestUtil.createPersistReplicatedRowTables(snc, jobConfig.getString("persistenceMode"))
+        // partitioned row tables
+        case "PartitionedRow" => CTTestUtil.createPartitionedRowTables(snc, redundancy)
+        case "PersistentPartitionRow" => CTTestUtil.createPersistPartitionedRowTables(snc,
+          redundancy, jobConfig.getString("persistenceMode"))
+        case "ColocatedRow" => CTTestUtil.createColocatedRowTables(snc, redundancy)
+        case "EvictionRow" => CTTestUtil.createRowTablesWithEviction(snc, redundancy)
+        case "PersistentColocatedRow" => CTTestUtil.createPersistColocatedTables(snc, redundancy,
+          jobConfig.getString("persistenceMode"))
+        case "ColocatedWithEvictionRow" => CTTestUtil.createColocatedRowTablesWithEviction(snc,
+          redundancy, jobConfig.getString("persistenceMode"))
+        // column tables
+        case "Column" => CTTestUtil.createColumnTables(snc, redundancy)
+        case "PersistentColumn" =>
+          CTTestUtil.createPersistColumnTables(snc, jobConfig.getString("persistenceMode"))
+        case "ColocatedColumn" => CTTestUtil.createColocatedColumnTables(snc, redundancy)
+        case "EvictionColumn" => CTTestUtil.createColumnTablesWithEviction(snc, redundancy)
+        case "PersistentColocatedColumn" => CTTestUtil.createPersistColocatedColumnTables(snc,
+          redundancy, jobConfig.getString("persistenceMode"))
+        case "ColocatedWithEvictionColumn" =>
+          CTTestUtil.createColocatedColumnTablesWithEviction(snc, redundancy)
         case _ =>
           pw.println(s"Did not find any match for ${tableType} to create tables")
           pw.close()

From 6d028d1f300ec4fff28238cb7d5e16c175b460ee Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 14 Aug 2017 15:06:31 +0530
Subject: [PATCH 03/33] - Test changes in ct.bt

---
 .../src/test/java/io/snappydata/hydra/ct/ct.bt   | 12 ++++++------
 .../src/test/java/io/snappydata/hydra/ct/ctHA.bt |  6 +++---
 .../test/java/io/snappydata/hydra/ct/ctMisc.bt   | 16 ++++++++--------
 .../java/io/snappydata/hydra/ct/ctSplitMode.conf |  2 +-
 .../hydra/ct/ctSplitModeCreateWithScript.conf    |  3 ++-
 .../hydra/ct/ctSplitModeCreateWithSparkApp.conf  |  2 +-
 6 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt b/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt
index 39f84437f3..746b8b423b 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ct.bt
@@ -27,7 +27,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PartitionedRow"
   numRowsValidation=true fullResultSetValidation=true
@@ -48,7 +48,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedRow"
   numRowsValidation=true fullResultSetValidation=true
@@ -58,7 +58,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="EvictionRow"
   scriptName="ct_create_and_load_eviction_row_table.sql"
@@ -92,7 +92,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
   numRowsValidation=true fullResultSetValidation=true
@@ -112,7 +112,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedColumn"
   numRowsValidation=true fullResultSetValidation=true
@@ -132,7 +132,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
   numRowsValidation=true fullResultSetValidation=true
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt b/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt
index 1b79ae7cd1..94ebdecd38 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctHA.bt
@@ -26,7 +26,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppLeadHA.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedRow"
   numVMsToStop=1
@@ -37,7 +37,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedColumn"
   scriptName="ct_create_and_load_colocated_column_table.sql"
@@ -83,7 +83,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppServerHA.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=1  persistenceMode="null"
+  redundantCopies=1  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
   numVMsToStop=1
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt b/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt
index 17355ac3dd..75cb9fa263 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctMisc.bt
@@ -3,7 +3,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,2  persistenceMode="null"
+  redundantCopies=0,2  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="PartitionedRow"
   numRowsValidation=true  fullResultSetValidation=true
@@ -35,7 +35,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0  persistenceMode="null"
+  redundantCopies=0  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedRow"
   numRowsValidation=true fullResultSetValidation=true
@@ -45,7 +45,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppLeadHA.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0  persistenceMode="null"
+  redundantCopies=0  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedRow"
   numVMsToStop=1
@@ -56,7 +56,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0  persistenceMode="null"
+  redundantCopies=0  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="EvictionRow"
   scriptName="ct_create_and_load_eviction_row_table.sql"
@@ -89,7 +89,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0  persistenceMode="null"
+  redundantCopies=0  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
   numRowsValidation=true fullResultSetValidation=true
@@ -109,7 +109,7 @@ io/snappydata/hydra/ct/ctSplitMode.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,2  persistenceMode="null"
+  redundantCopies=0,2  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="ColocatedColumn"
   numRowsValidation=true fullResultSetValidation=true
@@ -129,7 +129,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=0,2  persistenceMode="null"
+  redundantCopies=2  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
   numRowsValidation=true fullResultSetValidation=true
@@ -162,7 +162,7 @@ io/snappydata/hydra/ct/ctSplitModeCreateWithSparkAppServerHA.conf
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=2  locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
-  redundantCopies=2  persistenceMode="null"
+  redundantCopies=2  persistenceMode="sync"
   dataFilesLocation="/export/shared/QA_DATA/ct"
   tableType="Column"
   numVMsToStop=1
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf
index b577622ba5..da3b31e56e 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitMode.conf
@@ -33,7 +33,7 @@ TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
             maxThreads = 1
-            threadGroups = snappyThreads
+            threadGroups = snappyStoreThreads
             ;
 
 INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
index dc1fe7f610..1eeacbd85d 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf
@@ -5,6 +5,7 @@ hydra.Prms-testDescription = "This test starts the snappy cluster and spark clus
 
 INCLUDE $JTESTS/io/snappydata/hydra/northwind/startDualModeCluster.conf;
 
+
 //Create and load table using sql script
 INITTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSQLScripts
             io.snappydata.hydra.cluster.SnappyPrms-sqlScriptNames = ${scriptName}
@@ -34,7 +35,7 @@ TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
             maxThreads = 1
-            threadGroups = snappyThreads
+            threadGroups = snappyStoreThreads
             ;
 
 INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
index 77ae4ed3cd..ff7412e563 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithSparkApp.conf
@@ -33,7 +33,7 @@ TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
             maxThreads = 1
-            threadGroups = snappyThreads
+            threadGroups = snappyStoreThreads
             ;
 
 INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;

From 14e6275aac3a4de7ec8e94b78f1ca0580ac83ca6 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 5 Feb 2018 15:14:55 +0530
Subject: [PATCH 04/33] - Code formating changes in northWind.bt

---
 .../io/snappydata/hydra/SnappyTestUtils.scala |  50 +-
 .../io/snappydata/hydra/ct/CTTestUtil.scala   | 151 ++---
 .../hydra/ct/ValidateCTQueriesJob.scala       |  21 +-
 .../hydra/northwind/NWTestJob.scala           |  14 +-
 .../hydra/northwind/NWTestSparkApp.scala      |  11 +-
 .../hydra/northwind/NWTestUtil.scala          | 533 +++++-------------
 .../northwind/ValidateNWQueriesApp.scala      |   4 +-
 .../northwind/ValidateNWQueriesJob.scala      |  10 +-
 8 files changed, 246 insertions(+), 548 deletions(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index b34eff9a96..279770071d 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -34,33 +34,47 @@ object SnappyTestUtils {
   Executes the join query, matches the result with expected result, returns false if the query
   validation has failed.
   */
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-       pw: PrintWriter): Any = {
+  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
+  PrintWriter, sqlContext: SQLContext): Boolean = {
+    var validationFailed = false
     snc.sql("set spark.sql.crossJoin.enabled = true")
-    assertQuery(snc, sqlString, numRows, queryNum, pw )
+    validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext)
+    return validationFailed
   }
 
-
   /*
- Executes the query, matches the result with expected result, returns false if the query
- validation has failed.
- */
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-      pw: PrintWriter): Any = {
+   Executes the query, matches the result with expected result, returns false if the query
+   validation has failed.
+   */
+  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
+  PrintWriter, sqlContext: SQLContext): Boolean = {
+    var validationFailed = false
     val df = snc.sql(sqlString)
-    val count = df.count()
+    val count = df.count
     // scalastyle:off println
     println(s"Query $queryNum")
     df.explain(true)
-    pw.println(s"Query ${queryNum} \n df.count is : ${count} \n Expected numRows : ${numRows} " +
-        s"\n Table Type : ${tableType}")
-    println(s"Query ${queryNum} \n df.count is : ${count} \n Expected numRows : ${numRows} \n " +
-        s"Table Type : ${tableType}")
-    // scalastyle:on println
-    assert(df.count() == numRows,
-      s"Mismatch got for query ${queryNum} : df.count -> ${count} but expected numRows " +
-          s"-> $numRows for query = $sqlString Table Type : $tableType")
+    if (SnappyTestUtils.numRowsValidation) {
+      pw.println(s"No. rows in resultset for query ${queryNum} is : ${count} for " +
+          s"${SnappyTestUtils.tableType} table")
+      if (df.count() != numRows) {
+        pw.println(s"Result mismatch for query ${queryNum} : found ${count} rows but expected " +
+            s" ${numRows} rows.")
+        validationFailed = true
+      }
+      pw.flush()
+    }
+    if (SnappyTestUtils.validateFullResultSet) {
+      validationFailed = assertQueryFullResultSet(snc, sqlString, queryNum,
+        pw, sqlContext)
+    }
+
+    if (validationFailed) {
+      pw.println(s"Failed Query : " + sqlString + "\n Table Type : " + SnappyTestUtils.tableType
+          + "\n")
+    }
     pw.flush()
+    return validationFailed
   }
 
   def assertJoinFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String,
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
index f0f42bf3a9..310de0795a 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
@@ -27,14 +27,6 @@ object CTTestUtil {
 
   def getCurrentDirectory: String = new java.io.File(".").getCanonicalPath
 
-  def assertQuery(snc: SnappyContext, sqlString: String, queryNum: String, pw: PrintWriter):
-  Any = {
-    // scalastyle:off println
-    pw.println(s"Query execution for $queryNum")
-    val df = snc.sql(sqlString)
-    pw.println("Number of Rows for  : " + sqlString + " is :" + df.count())
-  }
-
   def createReplicatedRowTables(snc: SnappyContext): Unit = {
     snc.sql(CTQueries.orders_details_create_ddl)
     snc.sql(CTQueries.exec_details_create_ddl)
@@ -183,54 +175,54 @@ object CTTestUtil {
       var queryExecuted = true;
       var hasValidationFailed = false;
       q._1 match {
-        case "Q1" => hasValidationFailed = assertQuery(snc, CTQueries.query1, 1, "Q1",
-          pw, sqlContext)
-        case "Q2" => hasValidationFailed = assertQuery(snc, CTQueries.query2, 1, "Q2",
-          pw, sqlContext)
-        case "Q3" => hasValidationFailed = assertQuery(snc, CTQueries.query3, 1, "Q3",
-          pw, sqlContext)
-        case "Q4" => hasValidationFailed = assertQuery(snc, CTQueries.query4, 1, "Q4",
-          pw, sqlContext)
-        case "Q5" => hasValidationFailed = assertQuery(snc, CTQueries.query5, 1, "Q5",
-          pw, sqlContext)
-        case "Q6" => hasValidationFailed = assertQuery(snc, CTQueries.query6, 5, "Q6",
-          pw, sqlContext)
-        case "Q7" => hasValidationFailed = assertQuery(snc, CTQueries.query7, 5, "Q7",
-          pw, sqlContext)
-        case "Q8" => hasValidationFailed = assertQuery(snc, CTQueries.query8, 5, "Q8",
-          pw, sqlContext)
-        case "Q9" => hasValidationFailed = assertQuery(snc, CTQueries.query9, 1, "Q9",
-          pw, sqlContext)
-        case "Q10" => hasValidationFailed = assertQuery(snc, CTQueries.query10, 1, "Q10",
-          pw, sqlContext)
-        case "Q11" => hasValidationFailed = assertJoin(snc, CTQueries.query11, 2706, "Q11",
-          pw, sqlContext)
-        case "Q12" => hasValidationFailed = assertJoin(snc, CTQueries.query12, 150, "Q12",
-          pw, sqlContext)
-        case "Q13" => hasValidationFailed = assertQuery(snc, CTQueries.query13, 149, "Q13",
-          pw, sqlContext)
-        case "Q14" => hasValidationFailed = assertQuery(snc, CTQueries.query14, 149, "Q14",
-          pw, sqlContext)
-        case "Q15" => hasValidationFailed = assertJoin(snc, CTQueries.query15, 2620, "Q15",
-          pw, sqlContext)
-        case "Q16" => hasValidationFailed = assertJoin(snc, CTQueries.query16, 150, "Q16",
-          pw, sqlContext)
-        case "Q17" => hasValidationFailed = assertQuery(snc, CTQueries.query17, 2, "Q17",
-          pw, sqlContext)
-        case "Q18" => hasValidationFailed = assertQuery(snc, CTQueries.query18, 0, "Q18",
-          pw, sqlContext)
-        case "Q19" => hasValidationFailed = assertQuery(snc, CTQueries.query19, 47, "Q19",
-          pw, sqlContext)
-        case "Q20" => hasValidationFailed = assertQuery(snc, CTQueries.query20, 100, "Q20",
-          pw, sqlContext)
-        case "Q21" => hasValidationFailed = assertQuery(snc, CTQueries.query21, 2, "Q21",
-          pw, sqlContext)
-        case "Q22" => hasValidationFailed = assertJoin(snc, CTQueries.query22, 1, "Q22",
-          pw, sqlContext)
-        // case "Q23" => hasValidationFailed = assertJoin(snc, CTQueries.query23,0,"Q23",
-        //   pw,sqlContext)
-        case "Q24" => hasValidationFailed = assertQuery(snc, CTQueries.query24, 999, "Q24",
-          pw, sqlContext)
+        case "Q1" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query1, 1,
+          "Q1", pw, sqlContext)
+        case "Q2" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query2, 1,
+          "Q2", pw, sqlContext)
+        case "Q3" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query3, 1,
+          "Q3", pw, sqlContext)
+        case "Q4" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query4, 1,
+          "Q4", pw, sqlContext)
+        case "Q5" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query5, 1,
+          "Q5", pw, sqlContext)
+        case "Q6" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query6, 5,
+          "Q6", pw, sqlContext)
+        case "Q7" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query7, 5,
+          "Q7", pw, sqlContext)
+        case "Q8" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query8, 5,
+          "Q8", pw, sqlContext)
+        case "Q9" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query9, 1,
+          "Q9", pw, sqlContext)
+        case "Q10" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query10, 1,
+          "Q10", pw, sqlContext)
+        case "Q11" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, CTQueries.query11, 2706,
+          "Q11", pw, sqlContext)
+        case "Q12" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, CTQueries.query12, 150,
+          "Q12", pw, sqlContext)
+        case "Q13" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query13, 149,
+          "Q13", pw, sqlContext)
+        case "Q14" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query14, 149,
+          "Q14", pw, sqlContext)
+        case "Q15" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, CTQueries.query15, 2620,
+          "Q15", pw, sqlContext)
+        case "Q16" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, CTQueries.query16, 150,
+          "Q16", pw, sqlContext)
+        case "Q17" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query17, 2,
+          "Q17", pw, sqlContext)
+        case "Q18" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query18, 0,
+          "Q18", pw, sqlContext)
+        case "Q19" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query19, 47,
+          "Q19", pw, sqlContext)
+        case "Q20" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query20, 100,
+          "Q20", pw, sqlContext)
+        case "Q21" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query21, 2,
+          "Q21", pw, sqlContext)
+        case "Q22" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, CTQueries.query22, 1,
+          "Q22", pw, sqlContext)
+        // case "Q23" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, CTQueries.query23,0,
+        // "Q23", pw,sqlContext)
+        case "Q24" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, CTQueries.query24, 999,
+          "Q24", pw, sqlContext)
         case _ =>
           // scalastyle:off println
           pw.println(s"Query ${q._1} will not  be executed.")
@@ -250,51 +242,6 @@ object CTTestUtil {
     snc.sql("drop table if exists exec_details")
     snc.sql("drop table if exists orders_details")
   }
-
-  /*
-  Executes the join query, matches the result with expected result, returns false if the query
-  validation has failed.
-  */
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext): Boolean = {
-    var validationFailed = false
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext)
-    return validationFailed
-  }
-
-  /*
-   Executes the query, matches the result with expected result, returns false if the query
-   validation has failed.
-   */
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext): Boolean = {
-    var validationFailed = false
-    val df = snc.sql(sqlString)
-    val count = df.count
-    println(s"Query $queryNum")
-    df.explain(true)
-    if (SnappyTestUtils.numRowsValidation) {
-      pw.println(s"No. rows in resultset for query ${queryNum} is : ${count} for " +
-          s"${SnappyTestUtils.tableType} table")
-      if (df.count() != numRows) {
-        pw.println(s"Result mismatch for query ${queryNum} : found ${count} rows but expected " +
-            s" ${numRows} rows.")
-        validationFailed = true
-      }
-      pw.flush()
-    }
-    if (SnappyTestUtils.validateFullResultSet) {
-      validationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, sqlString, queryNum,
-        pw, sqlContext)
-    }
-
-    if (validationFailed) {
-      pw.println(s"Failed Query : " + sqlString + "\n Table Type : " + SnappyTestUtils.tableType
-          + "\n")
-    }
-    pw.flush()
-    return validationFailed
-  }
 }
 
+
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
index 23ac4380eb..b4b408bb4b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
@@ -31,7 +31,8 @@ class ValidateCTQueriesJob extends SnappySQLJob {
   override def runSnappyJob(snSession: SnappySession, jobConfig: Config): Any = {
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
     val threadID = Thread.currentThread().getId
-    val outputFile = "ValidateCTQueriesJob_thread_" + threadID + "_" + System.currentTimeMillis + ".out"
+    val outputFile =
+      "ValidateCTQueriesJob_thread_" + threadID + "_" + System.currentTimeMillis + ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
     val tableType = jobConfig.getString("tableType")
 
@@ -41,6 +42,7 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
       snc.setConf("dataFilesLocation", dataFilesLocation)
       CTQueries.snc = snc
+      // scalastyle:off println
       pw.println(s"Validation for $tableType tables started in snappy Job")
       val numRowsValidation: Boolean = jobConfig.getBoolean("numRowsValidation")
       val fullResultSetValidation: Boolean = jobConfig.getBoolean("fullResultSetValidation")
@@ -49,23 +51,28 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       if(numRowsValidation){
 
       }
-      if (fullResultSetValidation)
+      if (fullResultSetValidation) {
         pw.println(s"Test will perform fullResultSetValidation")
-      else
+      }
+      else {
         pw.println(s"Test will not perform fullResultSetValidation")
+      }
       val startTime = System.currentTimeMillis
       val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-        sqlContext,numRowsValidation)
+        sqlContext, numRowsValidation)
       val endTime = System.currentTimeMillis
       val totalTime = (endTime - startTime) / 1000
       pw.println(s"Total time for execution is :: ${totalTime} seconds.")
       if(!failedQueries.isEmpty) {
-        println(s"Validation failed for ${tableType} for queries ${failedQueries}. See ${getCurrentDirectory}/${outputFile}")
+        println(s"Validation failed for ${tableType} for queries ${failedQueries}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
         pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
         pw.close()
-        throw new TestException(s"Validation task failed for ${tableType}. See ${getCurrentDirectory}/${outputFile}")
+        throw new TestException(s"Validation task failed for ${tableType}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
       }
-      println(s"Validation for $tableType tables completed. See ${getCurrentDirectory}/${outputFile}")
+      println(s"Validation for $tableType tables completed. " +
+          s"See ${getCurrentDirectory}/${outputFile}")
       pw.println(s"Validation for $tableType tables completed.")
       pw.close()
     } match {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala
index 4ab289de0f..45f6cf9e5a 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala
@@ -19,10 +19,12 @@ package io.snappydata.hydra.northwind
 import java.io.{File, FileOutputStream, PrintWriter}
 
 import com.typesafe.config.Config
-import org.apache.spark.sql._
 
+import org.apache.spark.sql._
 import scala.util.{Failure, Success, Try}
 
+import org.apache.spark.SparkContext
+
 object NWTestJob extends SnappySQLJob {
   var regions, categories, shippers, employees, customers, orders, order_details, products,
   suppliers, territories, employee_territories: DataFrame = null
@@ -33,6 +35,8 @@ object NWTestJob extends SnappySQLJob {
     val pw = new PrintWriter(new FileOutputStream(new File("NWTestSnappyJob.out"), true));
     Try {
       val snc = snappySession.sqlContext
+      val sc = SparkContext.getOrCreate()
+      val sqlContext = SQLContext.getOrCreate(sc)
       snc.sql("set spark.sql.shuffle.partitions=6")
       val dataLocation = jobConfig.getString("dataFilesLocation")
       snc.setConf("dataFilesLocation", dataLocation)
@@ -75,21 +79,21 @@ object NWTestJob extends SnappySQLJob {
       // scalastyle:off println
       println("Test replicated row tables queries started")
       NWTestUtil.createAndLoadReplicatedTables(snc)
-      NWTestUtil.validateQueries(snc, "Replicated Row Table", pw)
+      NWTestUtil.validateQueries(snc, "Replicated Row Table", pw, sqlContext)
       println("Test replicated row tables queries completed successfully")
       NWTestUtil.dropTables(snc)
       println("Test partitioned row tables queries started")
       NWTestUtil.createAndLoadPartitionedTables(snc)
-      NWTestUtil.validateQueries(snc, "Partitioned Row Table", pw)
+      NWTestUtil.validateQueries(snc, "Partitioned Row Table", pw, sqlContext)
       println("Test partitioned row tables queries completed successfully")
       NWTestUtil.dropTables(snc)
       println("Test column tables queries started")
       NWTestUtil.createAndLoadColumnTables(snc)
-      NWTestUtil.validateQueries(snc, "Column Table", pw)
+      NWTestUtil.validateQueries(snc, "Column Table", pw, sqlContext)
       println("Test column tables queries completed successfully")
       NWTestUtil.dropTables(snc)
       NWTestUtil.createAndLoadColocatedTables(snc)
-      NWTestUtil.validateQueries(snc, "Colocated Table", pw)
+      NWTestUtil.validateQueries(snc, "Colocated Table", pw, sqlContext)
       pw.close()
     } match {
       case Success(v) => pw.close()
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
index 29bba7f1eb..305dc8b0fd 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
@@ -18,7 +18,7 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
-import org.apache.spark.sql.SnappyContext
+import org.apache.spark.sql.{SQLContext, SnappyContext}
 import org.apache.spark.{SparkConf, SparkContext}
 
 
@@ -30,6 +30,7 @@ object NWTestSparkApp {
         setAppName("NWTestSpark Application").
         set("snappydata.connection", connectionURL)
     val sc = SparkContext.getOrCreate(conf)
+    val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
     val dataFilesLocation = args(0)
     // scalastyle:off println
@@ -42,21 +43,21 @@ object NWTestSparkApp {
     NWTestUtil.dropTables(snc)
     println("Test replicated row tables queries started")
     NWTestUtil.createAndLoadReplicatedTables(snc)
-    NWTestUtil.validateQueries(snc, "Replicated Row Table", pw)
+    NWTestUtil.validateQueries(snc, "Replicated Row Table", pw, sqlContext)
     println("Test replicated row tables queries completed successfully")
     NWTestUtil.dropTables(snc)
     println("Test partitioned row tables queries started")
     NWTestUtil.createAndLoadPartitionedTables(snc)
-    NWTestUtil.validateQueries(snc, "Partitioned Row Table", pw)
+    NWTestUtil.validateQueries(snc, "Partitioned Row Table", pw, sqlContext)
     println("Test partitioned row tables queries completed successfully")
     NWTestUtil.dropTables(snc)
     println("Test column tables queries started")
     NWTestUtil.createAndLoadColumnTables(snc)
-    NWTestUtil.validateQueries(snc, "Column Table", pw)
+    NWTestUtil.validateQueries(snc, "Column Table", pw, sqlContext)
     println("Test column tables queries completed successfully")
     NWTestUtil.dropTables(snc)
     NWTestUtil.createAndLoadColocatedTables(snc)
-    NWTestUtil.validateQueries(snc, "Colocated Table", pw)
+    NWTestUtil.validateQueries(snc, "Colocated Table", pw, sqlContext)
     pw.close()
   }
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index 44c83b260f..73868621b3 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -18,164 +18,13 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, PrintWriter}
 
-import scala.io.Source
 
 import io.snappydata.hydra.SnappyTestUtils
 
 import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.encoders.RowEncoder
 
 object NWTestUtil {
 
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-                 tableType: String, pw: PrintWriter): Any = {
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    val df = snc.sql(sqlString)
-    // scalastyle:off println
-    println(s"Query $queryNum")
-    df.explain(true)
-    pw.println(s"Query ${queryNum} \n df.count for join query is : ${df.count} \n Expected " +
-        s"numRows : ${numRows} \n Table Type : ${tableType}")
-    println(s"Query ${queryNum} \n df.count for join query is : ${df.count} \n Expected numRows :" +
-        s" ${numRows} \n Table Type : ${tableType}")
-    // scalastyle:on println
-    assert(df.count() == numRows,
-      s"Mismatch got for query ${queryNum} : df.count -> ${df.count()} but expected numRows " +
-          s"-> $numRows " +
-          s" for query = $sqlString Table Type : $tableType\n" +
-          s"plan : ${df.explain(true)} ")
-    pw.flush()
-  }
-
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-                  tableType: String, pw: PrintWriter): Any = {
-    val df = snc.sql(sqlString)
-    // scalastyle:off println
-    println(s"Query $queryNum")
-    df.explain(true)
-    pw.println(s"Query ${queryNum} \n df.count is : ${df.count} \n Expected numRows : ${numRows} " +
-        s"\n Table Type : ${tableType}")
-    println(s"Query ${queryNum} \n df.count is : ${df.count} \n Expected numRows : ${numRows} \n " +
-        s"Table Type : ${tableType}")
-    // scalastyle:on println
-    assert(df.count() == numRows,
-      s"Mismatch got for query ${queryNum} : df.count -> ${df.count()} but expected numRows " +
-          s"-> $numRows for query = $sqlString Table Type : $tableType")
-    pw.flush()
-  }
-
-  def assertJoinFullResultSet(snc: SnappyContext, sqlString: String, numRows: Int, queryNum:
-  String, tableType: String, pw: PrintWriter, sqlContext: SQLContext): Any = {
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    sqlContext.sql("set spark.sql.crossJoin.enabled = true")
-    assertQueryFullResultSet(snc, sqlString, numRows, queryNum, tableType, pw, sqlContext)
-  }
-
-  def dataTypeConverter(row: Row): Row = {
-    val md = row.toSeq.map {
-      // case d: Double => "%18.1f".format(d).trim().toDouble
-      case d: Double => math.floor(d * 10.0 + 0.5) / 10.0
-      case de: BigDecimal => {
-        de.setScale(2, BigDecimal.RoundingMode.HALF_UP)
-      }
-      case i: Integer => {
-        i
-      }
-      case v => v
-    }
-    Row.fromSeq(md)
-  }
-
-  def writeToFile(df: DataFrame, dest: String, snc: SnappyContext): Unit = {
-    import snc.implicits._
-    df.map(dataTypeConverter)(RowEncoder(df.schema))
-        .map(row => {
-          val sb = new StringBuilder
-          row.toSeq.foreach {
-            case e if e == null =>
-              sb.append("NULL").append(",")
-            case e =>
-              sb.append(e.toString).append(",")
-          }
-          sb.toString()
-        }).write.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat").option(
-      "header", false).save(dest)
-  }
-
-  protected def getTempDir(dirName: String): String = {
-    val log: File = new File(".")
-    var dest: String = null
-    val dirString = log.getCanonicalPath;
-    if (dirName.equals("sparkQueryFiles")) {
-      val logDir = log.listFiles.filter(_.getName.equals("snappyleader.log"))
-      if (!logDir.isEmpty) {
-        val leaderLogFile: File = logDir.iterator.next()
-        if (leaderLogFile.exists()) dest = dirString + File.separator + ".." + File.separator + "" +
-            ".." + File.separator + dirName
-      }
-      else dest = dirString + File.separator + ".." + File.separator + dirName
-    }
-    else dest = log.getCanonicalPath + File.separator + dirName
-    val tempDir: File = new File(dest)
-    if (!tempDir.exists) tempDir.mkdir()
-    return tempDir.getAbsolutePath
-  }
-
-  def assertQueryFullResultSet(snc: SnappyContext, sqlString: String, numRows: Int, queryNum:
-  String, tableType: String, pw: PrintWriter, sqlContext: SQLContext): Any = {
-    // scalastyle:off println
-    var snappyDF = snc.sql(sqlString)
-    var sparkDF = sqlContext.sql(sqlString);
-    val snappyQueryFileName = s"Snappy_${queryNum}.out"
-    val sparkQueryFileName = s"Spark_${queryNum}.out"
-    val snappyDest: String = getTempDir("snappyQueryFiles") + File.separator + snappyQueryFileName
-    val sparkDest: String = getTempDir("sparkQueryFiles") + File.separator + sparkQueryFileName
-    val sparkFile: File = new java.io.File(sparkDest)
-    val snappyFile = new java.io.File(snappyDest)
-    val col1 = sparkDF.schema.fieldNames(0)
-    val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-    if (snappyFile.listFiles() == null) {
-      snappyDF = snappyDF.coalesce(1).orderBy(col1, col: _*)
-      writeToFile(snappyDF, snappyDest, snc)
-      pw.println(s"${queryNum} Result Collected in file $snappyDest")
-    }
-    if (sparkFile.listFiles() == null) {
-      sparkDF = sparkDF.coalesce(1).orderBy(col1, col: _*)
-      writeToFile(sparkDF, sparkDest, snc)
-      pw.println(s"${queryNum} Result Collected in file $sparkDest")
-    }
-    val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
-    val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines
-    var numLines = 0
-    while (expectedLineSet.hasNext && actualLineSet.hasNext) {
-      val expectedLine = expectedLineSet.next()
-      val actualLine = actualLineSet.next()
-      if (!actualLine.equals(expectedLine)) {
-        pw.println(s"\n** For ${queryNum} result mismatch observed**")
-        pw.println(s"\nExpected Result:\n $expectedLine")
-        pw.println(s"\nActual Result:\n $actualLine")
-        pw.println(s"\nQuery =" + sqlString + " Table Type : " + tableType)
-        /* assert(assertion = false, s"\n** For $queryNum result mismatch observed** \n" +
-            s"Expected Result \n: $expectedLine \n" +
-            s"Actual Result   \n: $actualLine \n" +
-            s"Query =" + sqlString + " Table Type : " + tableType)
-         */
-        // Commented due to Q37 failure by just the difference of 0.1 in actual and expected value
-      }
-      numLines += 1
-    }
-    if (actualLineSet.hasNext || expectedLineSet.hasNext) {
-      pw.println(s"\nFor ${queryNum} result count mismatch observed")
-      assert(assertion = false, s"\nFor $queryNum result count mismatch observed")
-    }
-    assert(numLines == numRows, s"\nFor $queryNum result count mismatch " +
-        s"observed: Expected=$numRows, Got=$numLines")
-    pw.flush()
-    // scalastyle:on println
-  }
-
   def createAndLoadReplicatedTables(snc: SnappyContext): Unit = {
 
     snc.sql(NWQueries.regions_table)
@@ -215,267 +64,141 @@ object NWTestUtil {
   /*
   Method for validating only number of rows with default data for northwind schema size data
   */
-  def validateQueries(snc: SnappyContext, tableType: String, pw: PrintWriter): Unit = {
-    for (q <- NWQueries.queries) {
-      q._1 match {
-        case "Q1" => SnappyTestUtils.assertQuery(snc, NWQueries.Q1, 8, "Q1", pw)
-        case "Q2" => SnappyTestUtils.assertQuery(snc, NWQueries.Q2, 91, "Q2", pw)
-        case "Q3" => SnappyTestUtils.assertQuery(snc, NWQueries.Q3, 830, "Q3", pw)
-        case "Q4" => SnappyTestUtils.assertQuery(snc, NWQueries.Q4, 9, "Q4", pw)
-        case "Q5" => SnappyTestUtils.assertQuery(snc, NWQueries.Q5, 9, "Q5", pw)
-        case "Q6" => SnappyTestUtils.assertQuery(snc, NWQueries.Q6, 9, "Q6", pw)
-        case "Q7" => SnappyTestUtils.assertQuery(snc, NWQueries.Q7, 9, "Q7", pw)
-        case "Q8" => SnappyTestUtils.assertQuery(snc, NWQueries.Q8, 6, "Q8", pw)
-        case "Q9" => SnappyTestUtils.assertQuery(snc, NWQueries.Q9, 3, "Q9", pw)
-        case "Q10" => SnappyTestUtils.assertQuery(snc, NWQueries.Q10, 2, "Q10", pw)
-        case "Q11" => SnappyTestUtils.assertQuery(snc, NWQueries.Q11, 4, "Q11", pw)
-        case "Q12" => SnappyTestUtils.assertQuery(snc, NWQueries.Q12, 2, "Q12", pw)
-        case "Q13" => SnappyTestUtils.assertQuery(snc, NWQueries.Q13, 2, "Q13", pw)
-        case "Q14" => SnappyTestUtils.assertQuery(snc, NWQueries.Q14, 69, "Q14", pw)
-        case "Q15" => SnappyTestUtils.assertQuery(snc, NWQueries.Q15, 5, "Q15", pw)
-        case "Q16" => SnappyTestUtils.assertQuery(snc, NWQueries.Q16, 8, "Q16", pw)
-        case "Q17" => SnappyTestUtils.assertQuery(snc, NWQueries.Q17, 3, "Q17", pw)
-        case "Q18" => SnappyTestUtils.assertQuery(snc, NWQueries.Q18, 9, "Q18", pw)
-        case "Q20" => SnappyTestUtils.assertQuery(snc, NWQueries.Q20, 1, "Q20", pw)
-        case "Q21" => SnappyTestUtils.assertQuery(snc, NWQueries.Q21, 1, "Q21", pw)
-        case "Q22" => SnappyTestUtils.assertQuery(snc, NWQueries.Q22, 1, "Q22", pw)
-        case "Q23" => SnappyTestUtils.assertQuery(snc, NWQueries.Q23, 1, "Q23", pw)
-        case "Q24" => SnappyTestUtils.assertQuery(snc, NWQueries.Q24, 4, "Q24", pw)
-        case "Q25" => SnappyTestUtils.assertJoin(snc, NWQueries.Q25, 1, "Q25", pw)
-        case "Q26" => SnappyTestUtils.assertJoin(snc, NWQueries.Q26, 86, "Q26", pw)
-        case "Q27" => SnappyTestUtils.assertJoin(snc, NWQueries.Q27, 9, "Q27", pw)
-        case "Q28" => SnappyTestUtils.assertJoin(snc, NWQueries.Q28, 12, "Q28", pw)
-        case "Q29" => SnappyTestUtils.assertJoin(snc, NWQueries.Q29, 8, "Q29", pw)
-        case "Q30" => SnappyTestUtils.assertJoin(snc, NWQueries.Q30, 8, "Q30", pw)
-        case "Q31" => SnappyTestUtils.assertJoin(snc, NWQueries.Q31, 830, "Q31", pw)
-        case "Q32" => SnappyTestUtils.assertJoin(snc, NWQueries.Q32, 8, "Q32", pw)
-        case "Q33" => SnappyTestUtils.assertJoin(snc, NWQueries.Q33, 37, "Q33", pw)
-        case "Q34" => SnappyTestUtils.assertJoin(snc, NWQueries.Q34, 5, "Q34", pw)
-        case "Q35" => SnappyTestUtils.assertJoin(snc, NWQueries.Q35, 3, "Q35", pw)
-        case "Q36" => SnappyTestUtils.assertJoin(snc, NWQueries.Q36, 290, "Q36", pw)
-        case "Q37" => SnappyTestUtils.assertJoin(snc, NWQueries.Q37, 77, "Q37", pw)
-        case "Q38" => SnappyTestUtils.assertJoin(snc, NWQueries.Q38, 2155, "Q38", pw)
-        case "Q39" => SnappyTestUtils.assertJoin(snc, NWQueries.Q39, 9, "Q39", pw)
-        case "Q40" => SnappyTestUtils.assertJoin(snc, NWQueries.Q40, 830, "Q40", pw)
-        case "Q41" => SnappyTestUtils.assertJoin(snc, NWQueries.Q41, 2155, "Q41", pw)
-        case "Q42" => SnappyTestUtils.assertJoin(snc, NWQueries.Q42, 22, "Q42", pw)
-        case "Q43" => SnappyTestUtils.assertJoin(snc, NWQueries.Q43, 830, "Q43", pw)
-        case "Q44" => SnappyTestUtils.assertJoin(snc, NWQueries.Q44, 830, "Q44", pw)  // LeftSemiJoinHash
-        case "Q45" => SnappyTestUtils.assertJoin(snc, NWQueries.Q45, 1788650, "Q45", pw)
-        case "Q46" => SnappyTestUtils.assertJoin(snc, NWQueries.Q46, 1788650, "Q46", pw)
-        case "Q47" => SnappyTestUtils.assertJoin(snc, NWQueries.Q47, 1788650, "Q47", pw)
-        case "Q48" => SnappyTestUtils.assertJoin(snc, NWQueries.Q48, 1788650, "Q48", pw)
-        case "Q49" => SnappyTestUtils.assertJoin(snc, NWQueries.Q49, 1788650, "Q49", pw)
-        case "Q50" => SnappyTestUtils.assertJoin(snc, NWQueries.Q50, 2155, "Q50", pw)
-        case "Q51" => SnappyTestUtils.assertJoin(snc, NWQueries.Q51, 2155, "Q51", pw)
-        case "Q52" => SnappyTestUtils.assertJoin(snc, NWQueries.Q52, 2155, "Q52", pw)
-        case "Q53" => SnappyTestUtils.assertJoin(snc, NWQueries.Q53, 2155, "Q53", pw)
-        case "Q54" => SnappyTestUtils.assertJoin(snc, NWQueries.Q54, 2155, "Q54", pw)
-        case "Q55" => SnappyTestUtils.assertJoin(snc, NWQueries.Q55, 21, "Q55", pw)
-        case "Q56" => SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56", pw)
-        case "Q57" => SnappyTestUtils.assertJoin(snc, NWQueries.Q57, 120, "Q57", pw)
-        case "Q58" => SnappyTestUtils.assertJoin(snc, NWQueries.Q58, 1, "Q58", pw)
-        case "Q59" => SnappyTestUtils.assertJoin(snc, NWQueries.Q59, 1, "Q59", pw)
-        case "Q60" => SnappyTestUtils.assertJoin(snc, NWQueries.Q60, 947, "Q60", pw)
-        // scalastyle:off println
-        case _ => println(s"Did not execute query ${q._1}")
-        // scalastyle:on println
-      }
-    }
-  }
-
-  def validateQueriesFullResultSet(snc: SnappyContext, tableType: String, pw: PrintWriter,
-                                   sqlContext: SQLContext): String = {
-    SnappyTestUtils.tableType = tableType
+  def validateQueries(snc: SnappyContext, tableType: String, pw: PrintWriter, sqlContext:
+  SQLContext): String = {
     var failedQueries = ""
+    if (SnappyTestUtils.validateFullResultSet) {
+      NWTestUtil.createAndLoadSparkTables(sqlContext)
+    }
     for (q <- NWQueries.queries) {
       var queryExecuted = true;
       var hasValidationFailed = false;
       q._1 match {
-        case "Q1" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q1, "Q1",
-             pw, sqlContext)
-        case "Q2" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q2, "Q2",
-             pw, sqlContext)
-        case "Q3" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q3, "Q3",
-             pw, sqlContext)
-        case "Q4" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q4, "Q4",
-             pw, sqlContext)
-        case "Q5" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q5, "Q5",
-             pw, sqlContext)
-        case "Q6" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q6, "Q6",
-             pw, sqlContext)
-        case "Q7" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q7, "Q7",
-             pw, sqlContext)
-        case "Q8" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q8, "Q8",
-             pw, sqlContext)
-        case "Q9" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q9, "Q9",
-             pw, sqlContext)
-        case "Q10" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q10, "Q10",
-             pw, sqlContext)
-        case "Q11" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q11, "Q11",
-             pw, sqlContext)
-        case "Q12" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q12, "Q12",
-              pw, sqlContext)
-        case "Q13" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q13, "Q13",
-             pw, sqlContext)
-        case "Q14" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q14, "Q14",
-             pw, sqlContext)
-        case "Q15" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q15, "Q15",
-             pw, sqlContext)
-        case "Q16" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q16, "Q16",
-             pw, sqlContext)
-        case "Q17" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q17, "Q17",
-             pw, sqlContext)
-        case "Q18" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q18, "Q18",
-             pw, sqlContext)
-        case "Q19" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q19, "Q19",
-             pw, sqlContext)
-        case "Q20" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q20, "Q20",
-             pw, sqlContext)
-        case "Q21" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q21, "Q21",
-             pw, sqlContext)
-        case "Q22" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q22, "Q22",
-             pw, sqlContext)
-        case "Q23" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q23, "Q23",
-             pw, sqlContext)
-        case "Q24" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q24, "Q24",
-             pw, sqlContext)
-        case "Q25" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q25, "Q25",
-             pw, sqlContext)
-        case "Q26" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q26, "Q26",
-             pw, sqlContext)
-        case "Q27" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q27, "Q27",
-             pw, sqlContext)
-        case "Q28" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q28, "Q28",
-             pw, sqlContext)
-        case "Q29" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q29, "Q29",
-             pw, sqlContext)
-        case "Q30" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q30, "Q30",
-             pw, sqlContext)
-        case "Q31" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q31, "Q31",
-             pw, sqlContext)
-        case "Q32" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q32, "Q32",
-             pw, sqlContext)
-        case "Q33" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q33, "Q33",
-             pw, sqlContext)
-        case "Q34" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q34, "Q34",
-             pw, sqlContext)
-        case "Q35" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q35, "Q35",
-             pw, sqlContext)
-        case "Q36" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
-             pw, sqlContext)
-        case "Q37" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
-             pw, sqlContext)
-        case "Q38" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
-             pw, sqlContext)
-        case "Q39" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q39, "Q39",
-             pw, sqlContext)
-        case "Q40" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q40, "Q40",
-             pw, sqlContext)
-        case "Q41" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q41, "Q41",
-             pw, sqlContext)
-        case "Q42" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q42, "Q42",
-             pw, sqlContext)
-        case "Q43" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q43, "Q43",
-             pw, sqlContext)
-        case "Q44" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q44, "Q44",
-             pw, sqlContext) // LeftSemiJoinHash
-        case "Q45" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q45, "Q45",
-             pw, sqlContext)
-        case "Q46" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q46, "Q46",
-             pw, sqlContext)
-        case "Q47" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q47, "Q47",
-             pw, sqlContext)
-        case "Q48" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q48, "Q48",
-             pw, sqlContext)
-        case "Q49" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q49, "Q49",
-             pw, sqlContext)
-        case "Q50" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q50, "Q50",
-             pw, sqlContext)
-        case "Q51" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q51, "Q51",
-             pw, sqlContext)
-        case "Q52" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q52, "Q52",
-             pw, sqlContext)
-        case "Q53" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q53, "Q53",
-             pw, sqlContext)
-        case "Q54" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q54, "Q54",
-             pw, sqlContext)
-        case "Q55" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
-             pw, sqlContext)
-        case "Q56" =>
-          hasValidationFailed = SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
-             pw, sqlContext)
-        case "Q57" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q57, "Q57",
-             pw, sqlContext)
-        case "Q58" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q58, "Q58",
-             pw, sqlContext)
-        case "Q59" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q59, "Q59",
-              pw, sqlContext)
-        case "Q60" =>
-          hasValidationFailed = SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q60, "Q60",
-             pw, sqlContext)
+        case "Q1" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q1, 8,
+          "Q1", pw, sqlContext)
+        case "Q2" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q2, 91,
+          "Q2", pw, sqlContext)
+        case "Q3" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q3, 830,
+          "Q3", pw, sqlContext)
+        case "Q4" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q4, 9,
+          "Q4", pw, sqlContext)
+        case "Q5" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q5, 9,
+          "Q5", pw, sqlContext)
+        case "Q6" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q6, 9,
+          "Q6", pw, sqlContext)
+        case "Q7" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q7, 9,
+          "Q7", pw, sqlContext)
+        case "Q8" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q8, 6,
+          "Q8", pw, sqlContext)
+        case "Q9" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q9, 3,
+          "Q9", pw, sqlContext)
+        case "Q10" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q10, 2,
+          "Q10", pw, sqlContext)
+        case "Q11" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q11, 4,
+          "Q11", pw, sqlContext)
+        case "Q12" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q12, 2,
+          "Q12", pw, sqlContext)
+        case "Q13" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q13, 2,
+          "Q13", pw, sqlContext)
+        case "Q14" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q14, 69,
+          "Q14", pw, sqlContext)
+        case "Q15" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q15, 5,
+          "Q15", pw, sqlContext)
+        case "Q16" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q16, 8,
+          "Q16", pw, sqlContext)
+        case "Q17" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q17, 3,
+          "Q17", pw, sqlContext)
+        case "Q18" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q18, 9,
+          "Q18", pw, sqlContext)
+        case "Q20" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q20, 1,
+          "Q20", pw, sqlContext)
+        case "Q21" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q21, 1,
+          "Q21", pw, sqlContext)
+        case "Q22" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q22, 1,
+          "Q22", pw, sqlContext)
+        case "Q23" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q23, 1,
+          "Q23", pw, sqlContext)
+        case "Q24" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q24, 4,
+          "Q24", pw, sqlContext)
+        case "Q25" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q25, 1,
+          "Q25", pw, sqlContext)
+        case "Q26" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q26, 86,
+          "Q26", pw, sqlContext)
+        case "Q27" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27, 9,
+          "Q27", pw, sqlContext)
+        case "Q28" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q28, 12,
+          "Q28", pw, sqlContext)
+        case "Q29" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q29, 8,
+          "Q29", pw, sqlContext)
+        case "Q30" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q30, 8,
+          "Q30", pw, sqlContext)
+        case "Q31" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31, 830,
+          "Q31", pw, sqlContext)
+        case "Q32" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q32, 8,
+          "Q32", pw, sqlContext)
+        case "Q33" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q33, 37,
+          "Q33", pw, sqlContext)
+        case "Q34" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q34, 5,
+          "Q34", pw, sqlContext)
+        case "Q35" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q35, 3,
+          "Q35", pw, sqlContext)
+        case "Q36" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q36, 290,
+          "Q36", pw, sqlContext)
+        case "Q37" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q37, 77,
+          "Q37", pw, sqlContext)
+        case "Q38" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q38, 2155,
+          "Q38", pw, sqlContext)
+        case "Q39" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q39, 9,
+          "Q39", pw, sqlContext)
+        case "Q40" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q40, 830,
+          "Q40", pw, sqlContext)
+        case "Q41" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q41, 2155,
+          "Q41", pw, sqlContext)
+        case "Q42" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q42, 22,
+          "Q42", pw, sqlContext)
+        case "Q43" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q43, 830,
+          "Q43", pw, sqlContext)
+        // LeftSemiJoinHash
+        case "Q44" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q44, 830,
+          "Q44", pw, sqlContext)
+        case "Q45" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q45,
+          1788650, "Q45", pw, sqlContext)
+        case "Q46" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q46,
+          1788650, "Q46", pw, sqlContext)
+        case "Q47" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q47,
+          1788650, "Q47", pw, sqlContext)
+        case "Q48" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q48,
+          1788650, "Q48", pw, sqlContext)
+        case "Q49" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q49,
+          1788650, "Q49", pw, sqlContext)
+        case "Q50" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q50, 2155,
+          "Q50", pw, sqlContext)
+        case "Q51" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q51, 2155,
+          "Q51", pw, sqlContext)
+        case "Q52" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q52, 2155,
+          "Q52", pw, sqlContext)
+        case "Q53" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q53, 2155,
+          "Q53", pw, sqlContext)
+        case "Q54" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q54, 2155,
+          "Q54", pw, sqlContext)
+        case "Q55" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q55, 21,
+          "Q55", pw, sqlContext)
+        case "Q56" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8,
+          "Q56", pw, sqlContext)
+        case "Q57" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q57, 120,
+          "Q57", pw, sqlContext)
+        case "Q58" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q58, 1,
+          "Q58", pw, sqlContext)
+        case "Q59" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q59, 1,
+          "Q59", pw, sqlContext)
+        case "Q60" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q60, 947,
+          "Q60", pw, sqlContext)
         // scalastyle:off println
-        case _ => println(s"Did not execute query ${q._1}")
+        case _ =>
+          pw.println(s"Query ${q._1} will not  be executed.")
+          queryExecuted = false
       }
-      if(queryExecuted) {
+      if (queryExecuted) {
         pw.println(s"Execution completed for query ${q._1}")
       }
       if (hasValidationFailed) {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index 22d7987379..874b588693 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -50,7 +50,7 @@ object ValidateNWQueriesApp {
       pw.println(s"Validate ${tableType} tables Queries Test started at : " + System
           .currentTimeMillis)
       pw.println(s"dataFilesLocation : ${dataFilesLocation}")
-      NWTestUtil.validateQueries(snc, tableType, pw)
+      NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
       pw.println(s"Validate ${tableType} tables Queries Test completed successfully at : " +
           System.currentTimeMillis)
     }
@@ -67,7 +67,7 @@ object ValidateNWQueriesApp {
         NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
       }
       else {
-        val failedQueries = NWTestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)
+        val failedQueries = NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
         if (!failedQueries.isEmpty) {
           println(s"Validation failed for ${tableType} for queries ${failedQueries}.. See " +
               s"${getCurrentDirectory}/${outputFile}")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
index d9eaf357bb..296967f785 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
@@ -50,7 +50,7 @@ class ValidateNWQueriesJob extends SnappySQLJob {
         // scalastyle:off println
         pw.println(s"Validate ${tableType} tables Queries Test started at : " + System
             .currentTimeMillis)
-        NWTestUtil.validateQueries(snc, tableType, pw)
+        NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
         pw.println(s"Validate ${tableType} tables Queries Test completed successfully at : " +
             System.currentTimeMillis)
       }
@@ -67,12 +67,14 @@ class ValidateNWQueriesJob extends SnappySQLJob {
           NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
         }
         else {
-          val failedQueries = NWTestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)
+          val failedQueries = NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
           if(!failedQueries.isEmpty) {
-            println(s"Validation failed for ${tableType} for queries ${failedQueries}. See ${getCurrentDirectory}/${outputFile}")
+            println(s"Validation failed for ${tableType} for queries ${failedQueries}. " +
+                s"See ${getCurrentDirectory}/${outputFile}")
             pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
             pw.close()
-            throw new TestException(s"Validation task failed for ${tableType}. See ${getCurrentDirectory}/${outputFile}")
+            throw new TestException(s"Validation task failed for ${tableType}. " +
+                s"See ${getCurrentDirectory}/${outputFile}")
           }
         }
         pw.println(s"validateQueriesFullResultSet ${tableType} tables Queries Test completed  " +

From 83e7732e26846b3b3a2f3304b9d436aacec585d1 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Wed, 7 Feb 2018 11:29:28 +0530
Subject: [PATCH 05/33] - Futher refactoring changes to northwind and ct bts.

---
 .../java/io/snappydata/hydra/ct/local.ct.conf |  14 ++
 .../hydra/northwind/local.northWind.conf      |   8 +
 .../io/snappydata/hydra/SnappyTestUtils.scala | 165 +++++++++---------
 .../hydra/concurrency/ConcTestUtils.scala     | 118 +++++++------
 .../hydra/ct/ValidateCTQueriesApp.scala       |   2 +-
 .../hydra/northwind/NWTestJob.scala           |  40 +----
 .../hydra/northwind/NWTestSparkApp.scala      |   4 +
 .../hydra/northwind/NWTestUtil.scala          | 126 ++++++-------
 .../northwind/ValidateNWQueriesApp.scala      |   4 +
 9 files changed, 246 insertions(+), 235 deletions(-)
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/ct/local.ct.conf

diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/local.ct.conf b/dtests/src/test/java/io/snappydata/hydra/ct/local.ct.conf
new file mode 100644
index 0000000000..0b4820f662
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/local.ct.conf
@@ -0,0 +1,14 @@
+hydra.HostPrms-hostNames =
+fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${snappyStoreHosts})" ncf
+fcn "hydra.TestConfigFcns.pool(\"<host1> <host2>\", ${leadHosts})" ncf
+fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${locatorHosts})" ncf
+fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${workerHosts})" ncf
+;
+
+io.snappydata.hydra.cluster.SnappyPrms-serverLauncherProps = " -heap-size=6g -memory-size=6g -critical-heap-percentage=95 -J-XX:+UnlockDiagnosticVMOptions  -J-XX:ParGCCardsPerStrideChunk=32768 -member-timeout=60000 -J-Dp2p.socket_timeout=60000 ";
+io.snappydata.hydra.cluster.SnappyPrms-locatorLauncherProps = " -member-timeout=60000 -J-Dp2p.socket_timeout=60000 ";
+
+io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g;
+io.snappydata.hydra.cluster.SnappyPrms-leadMemory = 4g;
+
+
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf
index 91dffdfae1..3feb1a63cd 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf
@@ -1,3 +1,4 @@
+
 hydra.HostPrms-hostNames =
 fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${snappyStoreHosts})" ncf
 fcn "hydra.TestConfigFcns.pool(\"<host4> <host5>\", ${leadHosts})" ncf
@@ -5,3 +6,10 @@ fcn "hydra.TestConfigFcns.pool(\"<host4> <host4> <host5>\", ${locatorHosts})" nc
 fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${workerHosts})" ncf
 ;
 
+
+io.snappydata.hydra.cluster.SnappyPrms-serverLauncherProps = " -heap-size=4g -memory-size=8g -J-XX:+UnlockDiagnosticVMOptions  -J-XX:ParGCCardsPerStrideChunk=32768 -member-timeout=60000 -J-Dp2p.socket_timeout=60000 ";
+io.snappydata.hydra.cluster.SnappyPrms-locatorLauncherProps = " -member-timeout=60000 -J-Dp2p.socket_timeout=60000 ";
+
+io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g;
+io.snappydata.hydra.cluster.SnappyPrms-leadMemory = 4g;
+
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 279770071d..b8e54d407e 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -30,60 +30,119 @@ object SnappyTestUtils {
   var numRowsValidation: Boolean = false;
   var tableType: String = null
 
+  /*
+  Executes the join query, matches only the full result with expected result, returns false if the
+  query
+  validation has failed.
+  */
+  def assertJoin(snc: SnappyContext, sqlString: String, queryNum: String, pw: PrintWriter,
+      sqlContext: SQLContext): Boolean = {
+    var validationFailed = false
+    numRowsValidation = false
+    validationFailed = assertJoin(snc, sqlString, 0, queryNum, pw, sqlContext)
+    return validationFailed
+  }
+
   /*
   Executes the join query, matches the result with expected result, returns false if the query
   validation has failed.
   */
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext): Boolean = {
+  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
+      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     var validationFailed = false
     snc.sql("set spark.sql.crossJoin.enabled = true")
     validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext)
     return validationFailed
   }
 
+  /*
+   Executes the query, matches only the full resultSet with expected result, returns false if the
+   query validation has failed.
+ */
+  def assertQuery(snc: SnappyContext, sqlString: String, queryNum: String,
+      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
+    numRowsValidation = false
+    assertQuery(snc, sqlString, 0, queryNum, pw, sqlContext)
+  }
   /*
    Executes the query, matches the result with expected result, returns false if the query
    validation has failed.
    */
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext): Boolean = {
+  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
+      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     var validationFailed = false
-    val df = snc.sql(sqlString)
-    val count = df.count
+    var snappyDF = snc.sql(sqlString)
+    val count = snappyDF.count
     // scalastyle:off println
     println(s"Query $queryNum")
-    df.explain(true)
-    if (SnappyTestUtils.numRowsValidation) {
-      pw.println(s"No. rows in resultset for query ${queryNum} is : ${count} for " +
-          s"${SnappyTestUtils.tableType} table")
-      if (df.count() != numRows) {
+    snappyDF.explain(true)
+    if (numRowsValidation) {
+      pw.println(s"No. of rows in resultset for query ${queryNum} is ${count} for " +
+          s"${tableType} table")
+      if (count != numRows) {
         pw.println(s"Result mismatch for query ${queryNum} : found ${count} rows but expected " +
             s" ${numRows} rows.")
         validationFailed = true
       }
       pw.flush()
     }
-    if (SnappyTestUtils.validateFullResultSet) {
-      validationFailed = assertQueryFullResultSet(snc, sqlString, queryNum,
-        pw, sqlContext)
+    var fullRSValidationFailed = false
+    if (validateFullResultSet) {
+
+      val snappyQueryFileName = s"Snappy_${queryNum}"
+      val snappyDest: String = getQueryResultDir("snappyQueryFiles") +
+          File.separator + snappyQueryFileName
+      // scalastyle:off println
+      pw.println(s"Snappy query results are at : ${snappyDest}")
+      val snappyFile: File = new java.io.File(snappyDest)
+
+      val sparkQueryFileName = s"Spark_${queryNum}"
+      val sparkDest: String = getQueryResultDir("sparkQueryFiles") + File.separator +
+          sparkQueryFileName
+      pw.println(s"Snappy query results are at : ${sparkDest}")
+      val sparkFile: File = new java.io.File(sparkDest)
+      var sparkDF = sqlContext.sql(sqlString)
+
+      try {
+        if (!snappyFile.exists()) {
+          val snap_col1 = snappyDF.schema.fieldNames(0)
+          val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
+          snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
+          writeToFile(snappyDF, snappyDest, snc)
+          // writeResultSetToCsv(snappyDF, snappyFile)
+          pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
+        }
+        if (!sparkFile.exists()) {
+          val col1 = sparkDF.schema.fieldNames(0)
+          val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
+          sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
+          writeToFile(sparkDF, sparkDest, snc)
+          // writeResultSetToCsv(sparkDF, sparkFile)
+          pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
+        }
+        fullRSValidationFailed = compareFiles(snappyFile, sparkFile, pw, fullRSValidationFailed)
+      } catch {
+        case ex: Exception => {
+          fullRSValidationFailed = true
+          pw.println(s"Full resultSet validation failed for ${queryNum} with following " +
+              s"exception:\n")
+          ex.printStackTrace(pw)
+        }
+      }
+      pw.flush()
     }
 
     if (validationFailed) {
-      pw.println(s"Failed Query : " + sqlString + "\n Table Type : " + SnappyTestUtils.tableType
-          + "\n")
+      pw.println(s"\nNumRows validation failed for query ${queryNum} on {$tableType} table.")
+    }
+    if(fullRSValidationFailed){
+      pw.println(s"\nFull resultset validation failed for query ${queryNum} on {$tableType} table.")
+      validationFailed = true
     }
     pw.flush()
     return validationFailed
   }
 
-  def assertJoinFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String,
-                              pw: PrintWriter, sqlContext: SQLContext): Boolean = {
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    sqlContext.sql("set spark.sql.crossJoin.enabled = true")
-    assertQueryFullResultSet(snc, sqlString, queryNum, pw, sqlContext)
-  }
-
   def dataTypeConverter(row: Row): Row = {
     val md = row.toSeq.map {
       // case d: Double => "%18.1f".format(d).trim().toDouble
@@ -147,58 +206,6 @@ object SnappyTestUtils {
     return queryResultDir.getAbsolutePath
   }
 
-  /*
-   Performs full resultSet validation from snappy for a select query against results in a
-   goldenFile.
- */
-  def assertQueryFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String,
-                               pw: PrintWriter, sqlContext: SQLContext): Boolean = {
-    var hasValidationFailed = false
-
-    val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = SnappyTestUtils.getQueryResultDir("snappyQueryFiles") +
-        File.separator + snappyQueryFileName
-    // scalastyle:off println
-    pw.println(snappyDest)
-    val snappyFile: File = new java.io.File(snappyDest)
-    var snappyDF = snc.sql(sqlString)
-
-    val sparkQueryFileName = s"Spark_${queryNum}"
-    val sparkDest: String = SnappyTestUtils.getQueryResultDir("sparkQueryFiles") + File.separator +
-        sparkQueryFileName
-    pw.println(sparkDest)
-    val sparkFile: File = new java.io.File(sparkDest)
-    var sparkDF = sqlContext.sql(sqlString)
-
-    try {
-      if (!snappyFile.exists()) {
-        val snap_col1 = snappyDF.schema.fieldNames(0)
-        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
-        writeToFile(snappyDF, snappyDest, snc)
-        // writeResultSetToCsv(snappyDF, snappyFile)
-        pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
-      }
-      if (!sparkFile.exists()) {
-        val col1 = sparkDF.schema.fieldNames(0)
-        val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-        sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
-        writeToFile(sparkDF, sparkDest, snc)
-        // writeResultSetToCsv(sparkDF, sparkFile)
-        pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
-      }
-      hasValidationFailed = compareFiles(snappyFile, sparkFile, pw, hasValidationFailed)
-    } catch {
-      case ex: Exception => {
-        hasValidationFailed = true
-        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
-        ex.printStackTrace(pw)
-      }
-    }
-    pw.flush()
-    return hasValidationFailed
-  }
-
   def compareFiles(snappyFile: File, sparkFile: File, pw: PrintWriter, validationFailed: Boolean):
   Boolean = {
     var hasValidationFailed = validationFailed
@@ -261,7 +268,7 @@ object SnappyTestUtils {
     var hasValidationFailed = validationFailed
 
     val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = SnappyTestUtils.getQueryResultDir("snappyQueryFiles") +
+    val snappyDest: String = getQueryResultDir("snappyQueryFiles") +
         File.separator + snappyQueryFileName
     pw.println(snappyDest)
     val snappyFile: File = new java.io.File(snappyDest)
@@ -278,7 +285,7 @@ object SnappyTestUtils {
         val snap_col1 = snappyDF.schema.fieldNames(0)
         val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
         snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
-        SnappyTestUtils.writeToFile(snappyDF, snappyDest, snc)
+        writeToFile(snappyDF, snappyDest, snc)
         // writeResultSetToCsv(snappyDF, snappyFile)
         pw.println(s"${queryNum} Result Collected in file $snappyDest")
       }
@@ -293,13 +300,13 @@ object SnappyTestUtils {
         val col1 = goldenDF.schema.fieldNames(0)
         val col = goldenDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
         goldenDF = goldenDF.repartition(1).sortWithinPartitions(col1, col: _*)
-        SnappyTestUtils.writeToFile(goldenDF, sortedGoldenDest, snc)
+        writeToFile(goldenDF, sortedGoldenDest, snc)
         // writeResultSetToCsv(goldenDF, sortedGoldenFile)
         pw.println(s"${queryNum} Result Collected in file ${sortedGoldenDest}")
       } else {
         pw.println(s"zero results in query $queryNum.")
       }
-      hasValidationFailed = SnappyTestUtils.compareFiles(snappyFile, sortedGoldenFile, pw,
+      hasValidationFailed = compareFiles(snappyFile, sortedGoldenFile, pw,
         hasValidationFailed)
 
     } catch {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala
index e679555b3b..1750979b6b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/concurrency/ConcTestUtils.scala
@@ -25,17 +25,18 @@ import org.apache.spark.sql.{SQLContext, SnappyContext}
 object ConcTestUtils {
   def validateAnalyticalQueriesFullResultSet(snc: SnappyContext, tableType: String, pw:
   PrintWriter, sqlContext: SQLContext): Unit = {
+    SnappyTestUtils.validateFullResultSet = true;
     for (q <- NWQueries.queries) {
       q._1 match {
-        case "Q37" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
+        case "Q37" => SnappyTestUtils.assertJoin(snc, NWQueries.Q37, "Q37",
           pw, sqlContext)
-        case "Q55" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
+        case "Q55" => SnappyTestUtils.assertJoin(snc, NWQueries.Q55, "Q55",
           pw, sqlContext)
-        case "Q36" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
+        case "Q36" => SnappyTestUtils.assertJoin(snc, NWQueries.Q36, "Q36",
           pw, sqlContext)
-        case "Q56" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
+        case "Q56" => SnappyTestUtils.assertJoin(snc, NWQueries.Q56, "Q56",
           pw, sqlContext)
-        case "Q38" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
+        case "Q38" => SnappyTestUtils.assertJoin(snc, NWQueries.Q38, "Q38",
           pw, sqlContext)
         // scalastyle:off println
         case _ => println("OK")
@@ -46,114 +47,115 @@ object ConcTestUtils {
 
   def validatePointLookUPQueriesFullResultSet(snc: SnappyContext, tableType: String, pw:
   PrintWriter, sqlContext: SQLContext): Unit = {
+    SnappyTestUtils.validateFullResultSet = true;
     for (q <- NWPLQueries.queries) {
       q._1 match {
-        case "Q1" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q1, "Q1",
+        case "Q1" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q1, "Q1",
           pw, sqlContext)
-        case "Q2" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q2, "Q2",
+        case "Q2" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q2, "Q2",
           pw, sqlContext)
-        case "Q3" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q3, "Q3",
+        case "Q3" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q3, "Q3",
           pw, sqlContext)
-        case "Q4" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q4, "Q4",
+        case "Q4" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q4, "Q4",
           pw, sqlContext)
-        case "Q5" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q5, "Q5",
+        case "Q5" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q5, "Q5",
           pw, sqlContext)
-        case "Q6" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q6, "Q6",
+        case "Q6" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q6, "Q6",
           pw, sqlContext)
-        case "Q7" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q7, "Q7",
+        case "Q7" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q7, "Q7",
           pw, sqlContext)
-        case "Q8" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q8, "Q8",
+        case "Q8" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q8, "Q8",
           pw, sqlContext)
-        case "Q9" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q9, "Q9",
+        case "Q9" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q9, "Q9",
           pw, sqlContext)
-        case "Q10" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q5, "Q10",
+        case "Q10" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q5, "Q10",
           pw, sqlContext)
-        case "Q11" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q11, "Q11",
+        case "Q11" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q11, "Q11",
           pw, sqlContext)
-        case "Q12" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q12, "Q12",
+        case "Q12" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q12, "Q12",
           pw, sqlContext)
-        case "Q13" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q13, "Q13",
+        case "Q13" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q13, "Q13",
           pw, sqlContext)
-        case "Q14" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q14, "Q14",
+        case "Q14" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q14, "Q14",
           pw, sqlContext)
-        case "Q15" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q15, "Q15",
+        case "Q15" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q15, "Q15",
           pw, sqlContext)
-        case "Q16" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q16, "Q16",
+        case "Q16" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q16, "Q16",
           pw, sqlContext)
-        case "Q17" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q17, "Q17",
+        case "Q17" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q17, "Q17",
           pw, sqlContext)
-        case "Q18" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q18, "Q18",
+        case "Q18" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q18, "Q18",
           pw, sqlContext)
-        case "Q19" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q19, "Q19",
+        case "Q19" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q19, "Q19",
           pw, sqlContext)
-        case "Q20" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q20, "Q20",
+        case "Q20" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q20, "Q20",
           pw, sqlContext)
-        case "Q21" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q21, "Q21",
+        case "Q21" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q21, "Q21",
           pw, sqlContext)
-        case "Q22" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q22, "Q22",
+        case "Q22" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q22, "Q22",
           pw, sqlContext)
-        case "Q23" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q23, "Q23",
+        case "Q23" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q23, "Q23",
           pw, sqlContext)
-        case "Q24" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q24, "Q24",
+        case "Q24" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q24, "Q24",
           pw, sqlContext)
-        case "Q25" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q25, "Q25",
+        case "Q25" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q25, "Q25",
           pw, sqlContext)
-        case "Q26" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q26, "Q28",
+        case "Q26" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q26, "Q28",
           pw, sqlContext)
-        case "Q27" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q27, "Q27",
+        case "Q27" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q27, "Q27",
           pw, sqlContext)
-        case "Q28" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q28, "Q28",
+        case "Q28" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q28, "Q28",
           pw, sqlContext)
-        case "Q29" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q29, "Q29",
+        case "Q29" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q29, "Q29",
           pw, sqlContext)
-        case "Q30" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q30, "Q30",
+        case "Q30" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q30, "Q30",
           pw, sqlContext)
-        case "Q31" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q31, "Q31",
+        case "Q31" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q31, "Q31",
           pw, sqlContext)
-        case "Q32" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q32, "Q32",
+        case "Q32" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q32, "Q32",
           pw, sqlContext)
-        case "Q33" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q33, "Q33",
+        case "Q33" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q33, "Q33",
           pw, sqlContext)
-        case "Q34" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q34, "Q34",
+        case "Q34" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q34, "Q34",
           pw, sqlContext)
-        case "Q35" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q35, "Q35",
+        case "Q35" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q35, "Q35",
           pw, sqlContext)
-        case "Q36" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q36, "Q36",
+        case "Q36" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q36, "Q36",
           pw, sqlContext)
-        case "Q37" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q37, "Q37",
+        case "Q37" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q37, "Q37",
           pw, sqlContext)
-        case "Q38" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q38, "Q38",
+        case "Q38" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q38, "Q38",
           pw, sqlContext)
-        case "Q39" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q39, "Q39",
+        case "Q39" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q39, "Q39",
           pw, sqlContext)
-        case "Q40" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q40, "Q40",
+        case "Q40" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q40, "Q40",
           pw, sqlContext)
-        case "Q41" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q41, "Q41",
+        case "Q41" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q41, "Q41",
           pw, sqlContext)
-        case "Q42" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q42, "Q42",
+        case "Q42" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q42, "Q42",
           pw, sqlContext)
-        case "Q43" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q43, "Q43",
+        case "Q43" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q43, "Q43",
           pw, sqlContext)
-        case "Q44" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q44, "Q44",
+        case "Q44" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q44, "Q44",
           pw, sqlContext)
-        case "Q45" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q45, "Q45",
+        case "Q45" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q45, "Q45",
           pw, sqlContext)
-        case "Q46" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q46, "Q46",
+        case "Q46" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q46, "Q46",
           pw, sqlContext)
-        case "Q47" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q47, "Q47",
+        case "Q47" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q47, "Q47",
           pw, sqlContext)
-        case "Q48" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q48, "Q48",
+        case "Q48" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q48, "Q48",
           pw, sqlContext)
-        case "Q49" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q49, "Q49",
+        case "Q49" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q49, "Q49",
           pw, sqlContext)
-        case "Q50" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q50, "Q50",
+        case "Q50" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q50, "Q50",
           pw, sqlContext)
-        case "Q51" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q51, "Q51",
+        case "Q51" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q51, "Q51",
           pw, sqlContext)
-        /* case "Q52" => SnappyTestUtils.assertQueryFullResultSet(snc, NWPLQueries.Q52, "Q52",
+        /* case "Q52" => SnappyTestUtils.assertQuery(snc, NWPLQueries.Q52, "Q52",
           pw, sqlContext) */
         // scalastyle:off println
-        case _ => println("OK")
+        case _ => pw.println(s"Query ${q._1} has not been executed.")
         // scalastyle:on println
       }
     }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
index 0a28afc4a3..1becf42191 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
@@ -56,7 +56,7 @@ object ValidateCTQueriesApp {
     }
     val startTime = System.currentTimeMillis
     val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-      sqlContext,numRowsValidation)
+      sqlContext, numRowsValidation)
     val endTime = System.currentTimeMillis
     val totalTime = (endTime - startTime) / 1000
     pw.println(s"Total time for execution is :: ${totalTime} seconds.")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala
index 45f6cf9e5a..1172dcd9a2 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestJob.scala
@@ -23,11 +23,11 @@ import com.typesafe.config.Config
 import org.apache.spark.sql._
 import scala.util.{Failure, Success, Try}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.SparkContext
 
 object NWTestJob extends SnappySQLJob {
-  var regions, categories, shippers, employees, customers, orders, order_details, products,
-  suppliers, territories, employee_territories: DataFrame = null
 
   def getCurrentDirectory: String = new java.io.File(".").getCanonicalPath
 
@@ -42,39 +42,8 @@ object NWTestJob extends SnappySQLJob {
       snc.setConf("dataFilesLocation", dataLocation)
       NWQueries.snc = snc
       NWQueries.dataFilesLocation = dataLocation
-      regions = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/regions.csv")
-      categories = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/categories.csv")
-      shippers = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/shippers.csv")
-      employees = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/employees.csv")
-      customers = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/customers.csv")
-      orders = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/orders.csv")
-      order_details = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/order-details.csv")
-      products = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/products.csv")
-      suppliers = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/suppliers.csv")
-      territories = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/territories.csv")
-      employee_territories = snc.read.format("com.databricks.spark.csv")
-          .option("header", "true")
-          .load(s"$dataLocation/employee-territories.csv")
+      SnappyTestUtils.numRowsValidation = true
+      SnappyTestUtils.validateFullResultSet = true
       NWTestUtil.dropTables(snc)
       // scalastyle:off println
       println("Test replicated row tables queries started")
@@ -84,6 +53,7 @@ object NWTestJob extends SnappySQLJob {
       NWTestUtil.dropTables(snc)
       println("Test partitioned row tables queries started")
       NWTestUtil.createAndLoadPartitionedTables(snc)
+
       NWTestUtil.validateQueries(snc, "Partitioned Row Table", pw, sqlContext)
       println("Test partitioned row tables queries completed successfully")
       NWTestUtil.dropTables(snc)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
index 305dc8b0fd..4d77dde4b5 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
@@ -18,6 +18,8 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.sql.{SQLContext, SnappyContext}
 import org.apache.spark.{SparkConf, SparkContext}
 
@@ -40,6 +42,8 @@ object NWTestSparkApp {
     NWQueries.snc = snc
     NWQueries.dataFilesLocation = dataFilesLocation
     val pw = new PrintWriter(new FileOutputStream(new File("NWTestSparkApp.out"), true));
+    SnappyTestUtils.numRowsValidation = true
+    SnappyTestUtils.validateFullResultSet = true
     NWTestUtil.dropTables(snc)
     println("Test replicated row tables queries started")
     NWTestUtil.createAndLoadReplicatedTables(snc)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index 73868621b3..3b2460f632 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -62,7 +62,8 @@ object NWTestUtil {
   }
 
   /*
-  Method for validating only number of rows with default data for northwind schema size data
+  Method for validating number of rows and fullresultset with default data for northwind schema
+  size data
   */
   def validateQueries(snc: SnappyContext, tableType: String, pw: PrintWriter, sqlContext:
   SQLContext): String = {
@@ -195,7 +196,7 @@ object NWTestUtil {
           "Q60", pw, sqlContext)
         // scalastyle:off println
         case _ =>
-          pw.println(s"Query ${q._1} will not  be executed.")
+          pw.println(s"Query ${q._1} has not been executed.")
           queryExecuted = false
       }
       if (queryExecuted) {
@@ -210,127 +211,128 @@ object NWTestUtil {
 
   def validateSelectiveQueriesFullResultSet(snc: SnappyContext, tableType: String, pw:
   PrintWriter, sqlContext: SQLContext): Unit = {
+    SnappyTestUtils.validateFullResultSet = true
     for (q <- NWQueries.queries) {
       q._1 match {
-        // case "Q1" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q1, "Q1",
+        // case "Q1" => SnappyTestUtils.assertQuery(snc, NWQueries.Q1, "Q1",
         //  pw, sqlContext)
-        // case "Q2" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q2, "Q2",
+        // case "Q2" => SnappyTestUtils.assertQuery(snc, NWQueries.Q2, "Q2",
         //  pw, sqlContext)
-        // case "Q3" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q3, "Q3",
+        // case "Q3" => SnappyTestUtils.assertQuery(snc, NWQueries.Q3, "Q3",
         //  pw, sqlContext)
-        // case "Q4" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q4, "Q4",
+        // case "Q4" => SnappyTestUtils.assertQuery(snc, NWQueries.Q4, "Q4",
         //  pw, sqlContext)
-        // case "Q5" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q5, "Q5",
+        // case "Q5" => SnappyTestUtils.assertQuery(snc, NWQueries.Q5, "Q5",
         //  pw, sqlContext)
-        case "Q6" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q6, "Q6",
+        case "Q6" => SnappyTestUtils.assertQuery(snc, NWQueries.Q6, "Q6",
            pw, sqlContext)
-        case "Q7" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q7, "Q7",
+        case "Q7" => SnappyTestUtils.assertQuery(snc, NWQueries.Q7, "Q7",
            pw, sqlContext)
-        // case "Q8" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q8, "Q8",
+        // case "Q8" => SnappyTestUtils.assertQuery(snc, NWQueries.Q8, "Q8",
         //  pw, sqlContext)
-        case "Q9" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q9, "Q9",
+        case "Q9" => SnappyTestUtils.assertQuery(snc, NWQueries.Q9, "Q9",
            pw, sqlContext)
-        // case "Q10" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q10, "Q10",
+        // case "Q10" => SnappyTestUtils.assertQuery(snc, NWQueries.Q10, "Q10",
         //  pw, sqlContext)
-        case "Q11" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q11, "Q11",
+        case "Q11" => SnappyTestUtils.assertQuery(snc, NWQueries.Q11, "Q11",
            pw, sqlContext)
-        case "Q12" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q12, "Q12",
+        case "Q12" => SnappyTestUtils.assertQuery(snc, NWQueries.Q12, "Q12",
            pw, sqlContext)
-        case "Q13" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q13, "Q13",
+        case "Q13" => SnappyTestUtils.assertQuery(snc, NWQueries.Q13, "Q13",
            pw, sqlContext)
-        case "Q14" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q14, "Q14",
+        case "Q14" => SnappyTestUtils.assertQuery(snc, NWQueries.Q14, "Q14",
            pw, sqlContext)
-        case "Q15" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q15, "Q15",
+        case "Q15" => SnappyTestUtils.assertQuery(snc, NWQueries.Q15, "Q15",
            pw, sqlContext)
-        case "Q16" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q16, "Q16",
+        case "Q16" => SnappyTestUtils.assertQuery(snc, NWQueries.Q16, "Q16",
            pw, sqlContext)
-        case "Q17" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q17, "Q17",
+        case "Q17" => SnappyTestUtils.assertQuery(snc, NWQueries.Q17, "Q17",
            pw, sqlContext)
-        case "Q18" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q18, "Q18",
+        case "Q18" => SnappyTestUtils.assertQuery(snc, NWQueries.Q18, "Q18",
            pw, sqlContext)
-        case "Q19" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q19, "Q19",
+        case "Q19" => SnappyTestUtils.assertQuery(snc, NWQueries.Q19, "Q19",
            pw, sqlContext)
-        case "Q20" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q20, "Q20",
+        case "Q20" => SnappyTestUtils.assertQuery(snc, NWQueries.Q20, "Q20",
            pw, sqlContext)
-        case "Q21" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q21, "Q21",
+        case "Q21" => SnappyTestUtils.assertQuery(snc, NWQueries.Q21, "Q21",
            pw, sqlContext)
-        case "Q22" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q22, "Q22",
+        case "Q22" => SnappyTestUtils.assertQuery(snc, NWQueries.Q22, "Q22",
            pw, sqlContext)
-        // case "Q23" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q23, "Q23",
+        // case "Q23" => SnappyTestUtils.assertQuery(snc, NWQueries.Q23, "Q23",
         //  pw, sqlContext)
-        case "Q24" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q24, "Q24",
+        case "Q24" => SnappyTestUtils.assertQuery(snc, NWQueries.Q24, "Q24",
            pw, sqlContext)
-        case "Q25" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q25, "Q25",
+        case "Q25" => SnappyTestUtils.assertJoin(snc, NWQueries.Q25, "Q25",
            pw, sqlContext)
-        case "Q26" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q26, "Q26",
+        case "Q26" => SnappyTestUtils.assertJoin(snc, NWQueries.Q26, "Q26",
            pw, sqlContext)
-        case "Q27" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q27, "Q27",
+        case "Q27" => SnappyTestUtils.assertJoin(snc, NWQueries.Q27, "Q27",
            pw, sqlContext)
-        case "Q28" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q28, "Q28",
+        case "Q28" => SnappyTestUtils.assertJoin(snc, NWQueries.Q28, "Q28",
            pw, sqlContext)
-        // case "Q29" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q29, "Q29",
+        // case "Q29" => SnappyTestUtils.assertJoin(snc, NWQueries.Q29, "Q29",
         //  pw, sqlContext)
-        case "Q30" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q30, "Q30",
+        case "Q30" => SnappyTestUtils.assertJoin(snc, NWQueries.Q30, "Q30",
            pw, sqlContext)
-        case "Q31" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q31, "Q31",
+        case "Q31" => SnappyTestUtils.assertJoin(snc, NWQueries.Q31, "Q31",
            pw, sqlContext)
-        case "Q32" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q32, "Q32",
+        case "Q32" => SnappyTestUtils.assertJoin(snc, NWQueries.Q32, "Q32",
            pw, sqlContext)
-        case "Q33" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q33, "Q33",
+        case "Q33" => SnappyTestUtils.assertJoin(snc, NWQueries.Q33, "Q33",
            pw, sqlContext)
-        case "Q34" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q34, "Q34",
+        case "Q34" => SnappyTestUtils.assertJoin(snc, NWQueries.Q34, "Q34",
            pw, sqlContext)
-/*        case "Q35" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q35, "Q35",
+/*        case "Q35" => SnappyTestUtils.assertJoin(snc, NWQueries.Q35, "Q35",
            pw, sqlContext) */
-        case "Q36" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q36, "Q36",
+        case "Q36" => SnappyTestUtils.assertJoin(snc, NWQueries.Q36, "Q36",
            pw, sqlContext)
-        case "Q37" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q37, "Q37",
+        case "Q37" => SnappyTestUtils.assertJoin(snc, NWQueries.Q37, "Q37",
            pw, sqlContext)
-        case "Q38" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q38, "Q38",
+        case "Q38" => SnappyTestUtils.assertJoin(snc, NWQueries.Q38, "Q38",
            pw, sqlContext)
-        case "Q39" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q39, "Q39",
+        case "Q39" => SnappyTestUtils.assertJoin(snc, NWQueries.Q39, "Q39",
            pw, sqlContext)
-        case "Q40" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q40, "Q40",
+        case "Q40" => SnappyTestUtils.assertJoin(snc, NWQueries.Q40, "Q40",
            pw, sqlContext)
-        case "Q41" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q41, "Q41",
+        case "Q41" => SnappyTestUtils.assertJoin(snc, NWQueries.Q41, "Q41",
            pw, sqlContext)
-        case "Q42" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q42, "Q42",
+        case "Q42" => SnappyTestUtils.assertJoin(snc, NWQueries.Q42, "Q42",
            pw, sqlContext)
-        case "Q43" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q43, "Q43",
+        case "Q43" => SnappyTestUtils.assertJoin(snc, NWQueries.Q43, "Q43",
            pw, sqlContext)
-        /* case "Q44" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q44, "Q44",
+        /* case "Q44" => SnappyTestUtils.assertJoin(snc, NWQueries.Q44, "Q44",
             pw, sqlContext) */ // LeftSemiJoinHash
-        /* case "Q45" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q45, "Q45",
+        /* case "Q45" => SnappyTestUtils.assertJoin(snc, NWQueries.Q45, "Q45",
            pw, sqlContext) */
-        /* case "Q46" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q46, "Q46",
+        /* case "Q46" => SnappyTestUtils.assertJoin(snc, NWQueries.Q46, "Q46",
          pw, sqlContext)
-        case "Q47" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q47, "Q47",
+        case "Q47" => SnappyTestUtils.assertJoin(snc, NWQueries.Q47, "Q47",
           pw, sqlContext)
-        case "Q48" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q48, "Q48",
+        case "Q48" => SnappyTestUtils.assertJoin(snc, NWQueries.Q48, "Q48",
           pw, sqlContext)
-        case "Q49" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q49, "Q49",
+        case "Q49" => SnappyTestUtils.assertJoin(snc, NWQueries.Q49, "Q49",
           pw, sqlContext)
-        case "Q50" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q50, "Q50",
+        case "Q50" => SnappyTestUtils.assertJoin(snc, NWQueries.Q50, "Q50",
           pw, sqlContext)
-        case "Q51" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q51, "Q51",
+        case "Q51" => SnappyTestUtils.assertJoin(snc, NWQueries.Q51, "Q51",
           pw, sqlContext)
-        case "Q52" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q52, "Q52",
+        case "Q52" => SnappyTestUtils.assertJoin(snc, NWQueries.Q52, "Q52",
           pw, sqlContext)
-        case "Q53" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q53, "Q53",
+        case "Q53" => SnappyTestUtils.assertJoin(snc, NWQueries.Q53, "Q53",
           pw, sqlContext) */
-        /* case "Q54" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q54, "Q54",
+        /* case "Q54" => SnappyTestUtils.assertJoin(snc, NWQueries.Q54, "Q54",
            pw, sqlContext) */
-        case "Q55" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q55, "Q55",
+        case "Q55" => SnappyTestUtils.assertJoin(snc, NWQueries.Q55, "Q55",
            pw, sqlContext)
-        case "Q56" => SnappyTestUtils.assertJoinFullResultSet(snc, NWQueries.Q56, "Q56",
+        case "Q56" => SnappyTestUtils.assertJoin(snc, NWQueries.Q56, "Q56",
            pw, sqlContext)
-/*        case "Q57" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q57, "Q57",
+/*        case "Q57" => SnappyTestUtils.assertQuery(snc, NWQueries.Q57, "Q57",
            pw, sqlContext) */
-        case "Q58" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q58, "Q58",
+        case "Q58" => SnappyTestUtils.assertQuery(snc, NWQueries.Q58, "Q58",
            pw, sqlContext)
-        case "Q59" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q59, "Q59",
+        case "Q59" => SnappyTestUtils.assertQuery(snc, NWQueries.Q59, "Q59",
            pw, sqlContext)
-        // case "Q60" => SnappyTestUtils.assertQueryFullResultSet(snc, NWQueries.Q60,"Q60",
+        // case "Q60" => SnappyTestUtils.assertQuery(snc, NWQueries.Q60,"Q60",
         //  pw, sqlContext)
         // scalastyle:off println
         case _ => println(s"Did not execute query ${q._1}")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index 874b588693..1751b99c9c 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -18,6 +18,8 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.sql.{SQLContext, SnappyContext}
 import org.apache.spark.{SparkConf, SparkContext}
 
@@ -39,6 +41,8 @@ object ValidateNWQueriesApp {
     val tableType = args(1)
     val fullResultSetValidation: Boolean = args(2).toBoolean
     val numRowsValidation: Boolean = args(4).toBoolean
+    SnappyTestUtils.validateFullResultSet = fullResultSetValidation
+    SnappyTestUtils.numRowsValidation = numRowsValidation
     val isSmokeRun: Boolean = args(3).toBoolean
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
     val threadID = Thread.currentThread().getId

From 4a2ea6aabb27eb031312221b4ea0c289acf92f8a Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Wed, 7 Feb 2018 17:25:29 +0530
Subject: [PATCH 06/33] - Some chanegs in log messages. - Commented sparkApp
 for validation in NW eviction tests as per Swati's request.

---
 ...onedRowTablesWithEvictionOverflowTest.conf |  4 +-
 ...wPartitionedRowTablesWithEvictionTest.conf |  5 +-
 .../io/snappydata/hydra/SnappyTestUtils.scala | 19 ++++--
 .../io/snappydata/hydra/ct/CTTestUtil.scala   | 18 ++++--
 .../hydra/ct/ValidateCTQueriesApp.scala       | 20 +++---
 .../hydra/ct/ValidateCTQueriesJob.scala       | 29 ++++-----
 .../hydra/northwind/NWTestUtil.scala          | 39 ++++++++++--
 .../northwind/ValidateNWQueriesApp.scala      | 57 ++++++++---------
 .../northwind/ValidateNWQueriesJob.scala      | 61 ++++++++-----------
 9 files changed, 139 insertions(+), 113 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionOverflowTest.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionOverflowTest.conf
index 5c99a9a5f1..c194ecda29 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionOverflowTest.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionOverflowTest.conf
@@ -19,15 +19,17 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
                         "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation},isSmokeRun=${isSmokeRun},numRowsValidation=${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
-            threadGroups = snappyStoreThreads
+            threadGroups = snappyThreads
             ;
 
+/*
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
             io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.northwind.ValidateNWQueriesApp
             io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${isSmokeRun} ${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             threadGroups = leadThreads
             ;
+*/
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSQLScripts
             io.snappydata.hydra.cluster.SnappyPrms-sqlScriptNames = nw_queries.sql
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionTest.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionTest.conf
index d63c56bd0d..746e161804 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionTest.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionTest.conf
@@ -19,15 +19,16 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
                         "dataFilesLocation=${dataFilesLocation},tableType=${tableType},fullResultSetValidation=${fullResultSetValidation},isSmokeRun=${isSmokeRun},numRowsValidation=${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
-            threadGroups = snappyStoreThreads
+            threadGroups = snappyThreads
             ;
-
+/*
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
             io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.northwind.ValidateNWQueriesApp
             io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${isSmokeRun} ${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
             threadGroups = leadThreads
             ;
+*/
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSQLScripts
             io.snappydata.hydra.cluster.SnappyPrms-sqlScriptNames = nw_queries.sql
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index b8e54d407e..f206320262 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -51,6 +51,9 @@ object SnappyTestUtils {
       pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     var validationFailed = false
     snc.sql("set spark.sql.crossJoin.enabled = true")
+    if (validateFullResultSet) {
+      sqlContext.sql("set spark.sql.crossJoin.enabled = true")
+    }
     validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext)
     return validationFailed
   }
@@ -77,8 +80,7 @@ object SnappyTestUtils {
     println(s"Query $queryNum")
     snappyDF.explain(true)
     if (numRowsValidation) {
-      pw.println(s"No. of rows in resultset for query ${queryNum} is ${count} for " +
-          s"${tableType} table")
+      pw.println(s"Query ${queryNum} returned ${count} rows for ${tableType} table")
       if (count != numRows) {
         pw.println(s"Result mismatch for query ${queryNum} : found ${count} rows but expected " +
             s" ${numRows} rows.")
@@ -93,13 +95,13 @@ object SnappyTestUtils {
       val snappyDest: String = getQueryResultDir("snappyQueryFiles") +
           File.separator + snappyQueryFileName
       // scalastyle:off println
-      pw.println(s"Snappy query results are at : ${snappyDest}")
+      // pw.println(s"Snappy query results are at : ${snappyDest}")
       val snappyFile: File = new java.io.File(snappyDest)
 
       val sparkQueryFileName = s"Spark_${queryNum}"
       val sparkDest: String = getQueryResultDir("sparkQueryFiles") + File.separator +
           sparkQueryFileName
-      pw.println(s"Snappy query results are at : ${sparkDest}")
+      // pw.println(s"Spark query results are at : ${sparkDest}")
       val sparkFile: File = new java.io.File(sparkDest)
       var sparkDF = sqlContext.sql(sqlString)
 
@@ -206,6 +208,14 @@ object SnappyTestUtils {
     return queryResultDir.getAbsolutePath
   }
 
+  /* In case of round-off, there is a difference of .1 is snappy and spark. We can ignore such
+  differences
+   */
+  def isIgnorable(actualLine: String, expectedLine: String): Boolean = {
+    var canBeIgnored = false
+    return canBeIgnored
+  }
+
   def compareFiles(snappyFile: File, sparkFile: File, pw: PrintWriter, validationFailed: Boolean):
   Boolean = {
     var hasValidationFailed = validationFailed
@@ -218,6 +228,7 @@ object SnappyTestUtils {
       val expectedLine = expectedLineSet.next()
       val actualLine = actualLineSet.next()
       if (!actualLine.equals(expectedLine)) {
+        isIgnorable(actualLine, expectedLine)
         hasValidationFailed = true
         pw.println(s"Expected Result : $expectedLine")
         pw.println(s"Actual Result   : $actualLine")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
index 310de0795a..68b734bd2e 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CTTestUtil.scala
@@ -158,17 +158,23 @@ object CTTestUtil {
   }
 
   /*
-  Performs validation for tables with the queries. Returns failed queries in a string.
+  Method for validating queires results for ct schema for 1.5GB data.
+  For any other data size, this method can be used for comparing full resultsets by setting
+  numRowsValidation to false.
+  Returns failed queries in a string.
    */
-  def executeQueries(snc: SnappyContext, tblType: String, pw: PrintWriter,
-      fullResultSetValidation: Boolean, sqlContext: SQLContext, numRowsValidation: Boolean): String
-  = {
-    SnappyTestUtils.validateFullResultSet = fullResultSetValidation
+  def executeQueries(snc: SnappyContext, tblType: String, pw: PrintWriter, sqlContext:
+  SQLContext): String = {
     SnappyTestUtils.tableType = tblType
-    SnappyTestUtils.numRowsValidation = numRowsValidation
     var failedQueries = ""
     if (SnappyTestUtils.validateFullResultSet) {
+      // scalastyle:off println
+      pw.println(s"createAndLoadSparkTables started ...")
+      val startTime = System.currentTimeMillis
       CTTestUtil.createAndLoadSparkTables(sqlContext)
+      val finishTime = System.currentTimeMillis()
+      pw.println(s"createAndLoadSparkTables completed successfully in : " + ((finishTime -
+          startTime)/1000) + " seconds")
     }
 
     for (q <- CTQueries.queries) {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
index 1becf42191..0076cdcbc1 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
@@ -19,8 +19,10 @@ package io.snappydata.hydra.ct
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.sql.{SQLContext, SnappyContext}
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.spark.{SparkConf, SparkContext}
 
 object ValidateCTQueriesApp {
 
@@ -47,23 +49,19 @@ object ValidateCTQueriesApp {
     val tableType = args(1)
     val fullResultSetValidation: Boolean = args(2).toBoolean
     val numRowsValidation: Boolean = args(3).toBoolean
+    SnappyTestUtils.numRowsValidation = numRowsValidation
+    SnappyTestUtils.validateFullResultSet = fullResultSetValidation
     pw.println(s"Validation for queries with ${tableType} tables started")
-    if (fullResultSetValidation) {
-      pw.println(s"Test will perform fullResultSetValidation")
-    }
-    else {
-      pw.println(s"Test will not perform fullResultSetValidation")
-    }
+
     val startTime = System.currentTimeMillis
-    val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-      sqlContext, numRowsValidation)
+    val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, sqlContext)
     val endTime = System.currentTimeMillis
     val totalTime = (endTime - startTime) / 1000
     pw.println(s"Total time for execution is :: ${totalTime} seconds.")
     if (!failedQueries.isEmpty) {
-      println(s"Validation failed for ${tableType} for queries ${failedQueries}.. See " +
+      println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. See " +
           s"${getCurrentDirectory}/${outputFile}")
-      pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
+      pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
       pw.close()
       throw new Exception(s"Validation task failed for ${tableType}. See " +
           s"${getCurrentDirectory}/${outputFile}")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
index b4b408bb4b..2646b0355b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
@@ -18,13 +18,15 @@
 package io.snappydata.hydra.ct
 
 import java.io.{File, FileOutputStream, PrintWriter}
+
 import scala.util.{Failure, Success, Try}
 
 import com.typesafe.config.Config
+import io.snappydata.hydra.SnappyTestUtils
 import util.TestException
 
 import org.apache.spark.SparkContext
-import org.apache.spark.sql.{SnappySession, SQLContext, SnappyJobValid, SnappyJobValidation, SnappySQLJob}
+import org.apache.spark.sql.{SQLContext, SnappyJobValid, SnappyJobValidation, SnappySQLJob, SnappySession}
 
 class ValidateCTQueriesJob extends SnappySQLJob {
 
@@ -46,34 +48,27 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       pw.println(s"Validation for $tableType tables started in snappy Job")
       val numRowsValidation: Boolean = jobConfig.getBoolean("numRowsValidation")
       val fullResultSetValidation: Boolean = jobConfig.getBoolean("fullResultSetValidation")
+      SnappyTestUtils.validateFullResultSet = fullResultSetValidation
+      SnappyTestUtils.numRowsValidation = numRowsValidation
       val sc = SparkContext.getOrCreate()
       val sqlContext = SQLContext.getOrCreate(sc)
-      if(numRowsValidation){
-
-      }
-      if (fullResultSetValidation) {
-        pw.println(s"Test will perform fullResultSetValidation")
-      }
-      else {
-        pw.println(s"Test will not perform fullResultSetValidation")
-      }
       val startTime = System.currentTimeMillis
-      val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-        sqlContext, numRowsValidation)
+      val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, sqlContext)
       val endTime = System.currentTimeMillis
       val totalTime = (endTime - startTime) / 1000
-      pw.println(s"Total time for execution is :: ${totalTime} seconds.")
       if(!failedQueries.isEmpty) {
-        println(s"Validation failed for ${tableType} for queries ${failedQueries}. " +
+        println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
-        pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
+        pw.println(s"Total execution took ${totalTime} seconds.")
+        pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
         pw.close()
         throw new TestException(s"Validation task failed for ${tableType}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
       }
-      println(s"Validation for $tableType tables completed. " +
+      println(s"Validation for $tableType tables completed sucessfully. " +
           s"See ${getCurrentDirectory}/${outputFile}")
-      pw.println(s"Validation for $tableType tables completed.")
+      pw.println(s"ValidateQueries for ${tableType} tables completed successfully in " +
+          totalTime + " seconds ")
       pw.close()
     } match {
       case Success(v) => pw.close()
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index 3b2460f632..f246c347ae 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -62,14 +62,22 @@ object NWTestUtil {
   }
 
   /*
-  Method for validating number of rows and fullresultset with default data for northwind schema
-  size data
+  Method for validating queires results for northwind schema with default size data.
+  For larger data size, this method can be used for comparing full resultsets by setting
+  numRowsValidation to false.
+    Returns failed queries in a string.
   */
   def validateQueries(snc: SnappyContext, tableType: String, pw: PrintWriter, sqlContext:
   SQLContext): String = {
     var failedQueries = ""
     if (SnappyTestUtils.validateFullResultSet) {
+      // scalastyle:off println
+      pw.println(s"createAndLoadSparkTables started ...")
+      val startTime = System.currentTimeMillis
       NWTestUtil.createAndLoadSparkTables(sqlContext)
+      val finishTime = System.currentTimeMillis()
+      pw.println(s"createAndLoadSparkTables completed successfully in : " + ((finishTime -
+          startTime)/1000) + " seconds")
     }
     for (q <- NWQueries.queries) {
       var queryExecuted = true;
@@ -196,7 +204,7 @@ object NWTestUtil {
           "Q60", pw, sqlContext)
         // scalastyle:off println
         case _ =>
-          pw.println(s"Query ${q._1} has not been executed.")
+          pw.println(s"Did not execute ${q._1}.")
           queryExecuted = false
       }
       if (queryExecuted) {
@@ -210,9 +218,22 @@ object NWTestUtil {
   }
 
   def validateSelectiveQueriesFullResultSet(snc: SnappyContext, tableType: String, pw:
-  PrintWriter, sqlContext: SQLContext): Unit = {
+  PrintWriter, sqlContext: SQLContext): String = {
+    SnappyTestUtils.numRowsValidation = false
     SnappyTestUtils.validateFullResultSet = true
+    var failedQueries = ""
+    if (SnappyTestUtils.validateFullResultSet) {
+      // scalastyle:off println
+      pw.println(s"createAndLoadSparkTables started ...")
+      val startTime = System.currentTimeMillis
+      NWTestUtil.createAndLoadSparkTables(sqlContext)
+      val finishTime = System.currentTimeMillis()
+      println(s"createAndLoadSparkTables completed successfully in : " + (finishTime - startTime) +
+          " seconds")
+    }
     for (q <- NWQueries.queries) {
+      var queryExecuted = true;
+      var hasValidationFailed = false;
       q._1 match {
         // case "Q1" => SnappyTestUtils.assertQuery(snc, NWQueries.Q1, "Q1",
         //  pw, sqlContext)
@@ -337,8 +358,17 @@ object NWTestUtil {
         // scalastyle:off println
         case _ => println(s"Did not execute query ${q._1}")
         // scalastyle:on println
+          queryExecuted = false
+      }
+      if (queryExecuted) {
+        // scalastyle:off println
+        pw.println(s"Execution completed for query ${q._1}")
+      }
+      if (hasValidationFailed) {
+        failedQueries = SnappyTestUtils.addToFailedQueryList(failedQueries, q._1)
       }
     }
+    return failedQueries;
   }
 
   def createAndLoadPartitionedTables(snc: SnappyContext,
@@ -600,5 +630,4 @@ object NWTestUtil {
     println("territories table dropped successfully.");
     // scalastyle:on println
   }
-
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index 1751b99c9c..1b67a1306e 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -43,47 +43,38 @@ object ValidateNWQueriesApp {
     val numRowsValidation: Boolean = args(4).toBoolean
     SnappyTestUtils.validateFullResultSet = fullResultSetValidation
     SnappyTestUtils.numRowsValidation = numRowsValidation
+    SnappyTestUtils.tableType = tableType
     val isSmokeRun: Boolean = args(3).toBoolean
+
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
+
     val threadID = Thread.currentThread().getId
     val outputFile = "ValidateNWQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
-    if (numRowsValidation) {
-      // scalastyle:off println
-      pw.println(s"Validate ${tableType} tables Queries Test started at : " + System
-          .currentTimeMillis)
-      pw.println(s"dataFilesLocation : ${dataFilesLocation}")
-      NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
-      pw.println(s"Validate ${tableType} tables Queries Test completed successfully at : " +
-          System.currentTimeMillis)
+    var failedQueries = ""
+    // scalastyle:off println
+    val startTime = System.currentTimeMillis()
+    pw.println(s"ValidateQueries for ${tableType} tables started ..")
+    if (isSmokeRun) {
+      NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
+    }
+    else {
+      failedQueries = NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
     }
-    if (fullResultSetValidation) {
-      pw.println(s"createAndLoadSparkTables Test started at : " + System.currentTimeMillis)
-      NWTestUtil.createAndLoadSparkTables(sqlContext)
-      println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
-      pw.println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
-      pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at :" +
-          s" " + System.currentTimeMillis)
-      if (isSmokeRun) {
-        NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
-      }
-      else {
-        val failedQueries = NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
-        if (!failedQueries.isEmpty) {
-          println(s"Validation failed for ${tableType} for queries ${failedQueries}.. See " +
-              s"${getCurrentDirectory}/${outputFile}")
-          pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
-          pw.close()
-          throw new Exception(s"Validation task failed for ${tableType}. See " +
-              s"${getCurrentDirectory}/${outputFile}")
-        }
-      }
-      pw.println(s"validateQueriesFullResultSet ${tableType} tables Queries Test completed  " +
-          s"successfully at : " + System.currentTimeMillis)
+    val finishTime = System.currentTimeMillis()
+    val totalTime = (finishTime -startTime)/1000
+    if (!failedQueries.isEmpty) {
+      println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. See " +
+          s"${getCurrentDirectory}/${outputFile}")
+      pw.println(s"Total execution took ${totalTime} seconds.")
+      pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+      pw.close()
+      throw new Exception(s"Validation task failed for ${tableType} tables. See " +
+          s"${getCurrentDirectory}/${outputFile}")
     }
+    pw.println(s"ValidateQueries for ${tableType} tables completed successfully in ${totalTime} " +
+        s"seconds ")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
index 296967f785..d59d40e43a 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
@@ -20,10 +20,10 @@ import java.io.{File, FileOutputStream, PrintWriter}
 
 import util.TestException
 import com.typesafe.config.Config
-import io.snappydata.hydra.northwind
+import io.snappydata.hydra.{SnappyTestUtils, northwind}
+
 import org.apache.spark.SparkContext
 import org.apache.spark.sql._
-
 import scala.util.{Failure, Success, Try}
 
 class ValidateNWQueriesJob extends SnappySQLJob {
@@ -38,48 +38,41 @@ class ValidateNWQueriesJob extends SnappySQLJob {
     val isSmokeRun: Boolean = jobConfig.getString("isSmokeRun").toBoolean
     val fullResultSetValidation: Boolean = jobConfig.getString("fullResultSetValidation").toBoolean
     val numRowsValidation: Boolean = jobConfig.getString("numRowsValidation").toBoolean
+    SnappyTestUtils.validateFullResultSet = fullResultSetValidation
+    SnappyTestUtils.numRowsValidation = numRowsValidation
+    SnappyTestUtils.tableType = tableType
     val sc = SparkContext.getOrCreate()
     val sqlContext = SQLContext.getOrCreate(sc)
     Try {
+      var failedQueries: String = "";
       snc.sql("set spark.sql.shuffle.partitions=23")
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
       snc.setConf("dataFilesLocation", dataFilesLocation)
       northwind.NWQueries.snc = snc
       NWQueries.dataFilesLocation = dataFilesLocation
-      if (numRowsValidation) {
-        // scalastyle:off println
-        pw.println(s"Validate ${tableType} tables Queries Test started at : " + System
-            .currentTimeMillis)
-        NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
-        pw.println(s"Validate ${tableType} tables Queries Test completed successfully at : " +
-            System.currentTimeMillis)
+      // scalastyle:off println
+      val startTime = System.currentTimeMillis()
+      pw.println(s"ValidateQueries for ${tableType} tables started ..")
+      if (isSmokeRun) {
+        failedQueries = NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw,
+          sqlContext)
+      }
+      else {
+        failedQueries = NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
       }
-      if (fullResultSetValidation) {
-        pw.println(s"createAndLoadSparkTables Test started at : " + System.currentTimeMillis)
-        NWTestUtil.createAndLoadSparkTables(sqlContext)
-        println(s"createAndLoadSparkTables Test completed successfully at : " + System
-            .currentTimeMillis)
-        pw.println(s"createAndLoadSparkTables Test completed successfully at : " + System
-            .currentTimeMillis)
-        pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at" +
-            s" :  " + System.currentTimeMillis)
-        if (isSmokeRun) {
-          NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
-        }
-        else {
-          val failedQueries = NWTestUtil.validateQueries(snc, tableType, pw, sqlContext)
-          if(!failedQueries.isEmpty) {
-            println(s"Validation failed for ${tableType} for queries ${failedQueries}. " +
-                s"See ${getCurrentDirectory}/${outputFile}")
-            pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
-            pw.close()
-            throw new TestException(s"Validation task failed for ${tableType}. " +
-                s"See ${getCurrentDirectory}/${outputFile}")
-          }
-        }
-        pw.println(s"validateQueriesFullResultSet ${tableType} tables Queries Test completed  " +
-            s"successfully at : " + System.currentTimeMillis)
+      val finishTime = System.currentTimeMillis()
+      val totalTime = (finishTime -startTime)/1000
+      if (!failedQueries.isEmpty) {
+        println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
+        pw.println(s"Total execution took ${totalTime} seconds.")
+        pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+        pw.close()
+        throw new TestException(s"Validation task failed for ${tableType}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
       }
+      pw.println(s"ValidateQueries for ${tableType} tables completed successfully in " +
+          totalTime + " seconds ")
       pw.close()
     } match {
       case Success(v) => pw.close()

From 2fbf22c65f7600c5e20ac4466560883c6e96da63 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Fri, 9 Feb 2018 14:56:25 +0530
Subject: [PATCH 07/33] - Fixing minor test issue.

---
 .../java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java b/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java
index 9ffe7eff3a..6ce69740f3 100644
--- a/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java
+++ b/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java
@@ -669,7 +669,7 @@ public void performUpdate() {
       if (hasDerbyServer) {
         dConn = derbyTestUtils.getDerbyConnection();
         if(stmt.toUpperCase().contains("SELECT"))
-          getAndExecuteSelect(conn,stmt,true);
+          getAndExecuteSelect(dConn,stmt,true);
         Log.getLogWriter().info("Executing " + stmt + " on derby.");
         int derbyRows = dConn.createStatement().executeUpdate(stmt);
         Log.getLogWriter().info("Updated " + derbyRows + " rows in derby.");
@@ -722,7 +722,7 @@ public void performDelete() {
       if (hasDerbyServer) {
         dConn = derbyTestUtils.getDerbyConnection();
         if(stmt.toUpperCase().contains("SELECT"))
-          getAndExecuteSelect(conn,stmt,true);
+          getAndExecuteSelect(dConn,stmt,true);
         Log.getLogWriter().info("Executing " + stmt + " on derby.");
         int derbyRows = dConn.createStatement().executeUpdate(stmt);
         Log.getLogWriter().info("Deleted " + derbyRows + " rows in derby.");

From 26fa3dc4bf27d7dc2eaf66efe9cf969411a4e2aa Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Fri, 9 Feb 2018 15:01:16 +0530
Subject: [PATCH 08/33] - Changes related to SNAP-2208 : Use --conf to set
 snappy.connection property instead of sending as a argument and setting it in
 SparkContext.

---
 .../test/java/io/snappydata/hydra/cluster/SnappyTest.java    | 3 ++-
 .../scala/io/snappydata/hydra/AirlineDataSparkJobApp.scala   | 4 +---
 .../scala/io/snappydata/hydra/InstallJarTestSparkApp.scala   | 4 +---
 .../io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala    | 4 +---
 .../scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala  | 4 +---
 .../hydra/northwind/CreateAndLoadNWTablesSparkApp.scala      | 4 +---
 .../hydra/northwind/EvictionTestNWQueriesApp.scala           | 4 +---
 .../hydra/northwind/NWSparkTablesAndQueriesApp.scala         | 4 +---
 .../scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala | 4 +---
 .../io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala | 4 +---
 .../hydra/security/CreateAndLoadTablesSparkApp.scala         | 5 +----
 .../io/snappydata/hydra/testDMLOps/ValidateDMLOpApp.scala    | 4 +---
 12 files changed, 13 insertions(+), 35 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 8ac77c7de8..f59d67f839 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -2073,10 +2073,11 @@ public void executeSparkJob(Vector jobClassNames, String logFileName) {
             SnappyPrms.getExecutorMemory() + " " +
             " --jars " + getStoreTestsJar() +
             SnappyPrms.getSparkSubmitExtraPrms() + " " +
+            " --conf snappydata.connection=" + primaryLocatorHost + ":" + primaryLocatorPort +
             " --conf spark.executor.extraJavaOptions=-XX:+HeapDumpOnOutOfMemoryError" +
             " --conf spark.extraListeners=io.snappydata.hydra.SnappyCustomSparkListener" +
             " " + snappyTest.getUserAppJarLocation(userAppJar, jarPath) + " " +
-            userAppArgs + " " + primaryLocatorHost + ":" + primaryLocatorPort;
+            userAppArgs;
         Log.getLogWriter().info("spark-submit command is : " + command);
         log = new File(".");
         String dest = log.getCanonicalPath() + File.separator + logFileName;
diff --git a/dtests/src/test/scala/io/snappydata/hydra/AirlineDataSparkJobApp.scala b/dtests/src/test/scala/io/snappydata/hydra/AirlineDataSparkJobApp.scala
index 4113a9c86e..2cfbe15382 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/AirlineDataSparkJobApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/AirlineDataSparkJobApp.scala
@@ -29,10 +29,8 @@ object AirlineDataSparkJobApp {
 
   def main(args: Array[String]) {
     // scalastyle:off println
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("Airline Data Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("Airline Data Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
     snc.sql("set spark.sql.shuffle.partitions=6")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala
index 138ce6b765..99a63eb032 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala
@@ -31,10 +31,8 @@ object InstallJarTestSparkApp {
   val snc = SnappyContext(sc)
 
   def main(args: Array[String]): Unit = {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("InstallJarTest Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("InstallJarTest Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
     val threadID = Thread.currentThread().getId
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala
index da2a09dc4d..0c7a3f2915 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala
@@ -25,10 +25,8 @@ import org.apache.spark.{SparkContext, SparkConf}
 object CreateAndLoadCTTablesApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("CreateAndLoadCTTables Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("CreateAndLoadCTTables Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
     val dataFilesLocation = args(0)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
index 0076cdcbc1..9115e30d36 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
@@ -27,10 +27,8 @@ import org.apache.spark.{SparkConf, SparkContext}
 object ValidateCTQueriesApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("ValidateCTQueriesApp Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("ValidateCTQueriesApp Application")
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala
index bfebacf13d..1daa342255 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala
@@ -24,10 +24,8 @@ import org.apache.spark.{SparkContext, SparkConf}
 object CreateAndLoadNWTablesSparkApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("CreateAndLoadNWTablesSpark Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("CreateAndLoadNWTablesSpark Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
     val dataFilesLocation = args(0)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/EvictionTestNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/EvictionTestNWQueriesApp.scala
index f0497d8e4e..9e876b3bd3 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/EvictionTestNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/EvictionTestNWQueriesApp.scala
@@ -24,10 +24,8 @@ import org.apache.spark.{SparkConf, SparkContext}
 object EvictionTestNWQueriesApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("EvictionTestNWQueriesApp Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("EvictionTestNWQueriesApp Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
     val tableType = args(0)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWSparkTablesAndQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWSparkTablesAndQueriesApp.scala
index 2d27d745f6..90cbe9ac5f 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWSparkTablesAndQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWSparkTablesAndQueriesApp.scala
@@ -24,10 +24,8 @@ import org.apache.spark.{SparkConf, SparkContext}
 object NWSparkTablesAndQueriesApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("NWSparkTablesAndQueriesApp Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("NWSparkTablesAndQueriesApp Application")
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
index 4d77dde4b5..5847090472 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestSparkApp.scala
@@ -27,10 +27,8 @@ import org.apache.spark.{SparkConf, SparkContext}
 object NWTestSparkApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("NWTestSpark Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("NWTestSpark Application")
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index 1b67a1306e..c845ef41b1 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -26,10 +26,8 @@ import org.apache.spark.{SparkConf, SparkContext}
 object ValidateNWQueriesApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("ValidateNWQueries Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("ValidateNWQueries Application")
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/security/CreateAndLoadTablesSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/security/CreateAndLoadTablesSparkApp.scala
index b4f02a6265..ca93887fec 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/security/CreateAndLoadTablesSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/security/CreateAndLoadTablesSparkApp.scala
@@ -28,11 +28,8 @@ object CreateAndLoadTablesSparkApp {
   def main(args: Array[String]) {
     // scalastyle:off println
     Thread.sleep(60000L)
-    val connectionURL = args(args.length - 1)
-    println("The connection url is " + connectionURL)
     val conf = new SparkConf().
-        setAppName("CreateAndLoadNWTablesSpark Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("CreateAndLoadNWTablesSpark Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/testDMLOps/ValidateDMLOpApp.scala b/dtests/src/test/scala/io/snappydata/hydra/testDMLOps/ValidateDMLOpApp.scala
index 872c5ec9b8..e814679c16 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/testDMLOps/ValidateDMLOpApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/testDMLOps/ValidateDMLOpApp.scala
@@ -25,10 +25,8 @@ import org.apache.spark.{SparkConf, SparkContext}
 object ValidateDMLOpApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("DMLOp Application").
-        set("snappydata.connection", connectionURL)
+        setAppName("DMLOp Application")
     val sc = SparkContext.getOrCreate(conf)
     val snc = SnappyContext(sc)
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath

From 676fbe2387752f38335b2608b3e41f7231c456d6 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Fri, 9 Feb 2018 18:10:11 +0530
Subject: [PATCH 09/33] - Changes for SNAP-2210 : Changing the test
 configuration to have 2 locators instead of 3.

---
 .../snappydata/hydra/alterTable/alterTable.bt | 10 +++---
 .../hydra/cluster/startLongRunningCluster.bt  |  2 +-
 .../snappydata/hydra/distIndex/distIndex.bt   |  4 +--
 .../snappydata/hydra/installJar/installJar.bt | 10 +++---
 .../snappydata/hydra/longRun/longRunTest.bt   |  6 ++--
 .../snappydata/hydra/northwind/northWind.bt   | 22 ++++++------
 .../snappydata/hydra/northwind/northWindHA.bt | 36 +++++++++----------
 .../hydra/northwind/northWindUseCase.bt       |  2 +-
 .../clusterRestartWithPersistentRecovery.bt   |  2 +-
 ...rRestartWithRandomOrderForServerStartUp.bt |  2 +-
 .../io/snappydata/hydra/startUp/startUp.bt    | 18 +++++-----
 .../hydra/InstallJarTestSparkApp.scala        |  4 ---
 12 files changed, 57 insertions(+), 61 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/alterTable/alterTable.bt b/dtests/src/test/java/io/snappydata/hydra/alterTable/alterTable.bt
index 0aa4ccb900..c4614d2217 100644
--- a/dtests/src/test/java/io/snappydata/hydra/alterTable/alterTable.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/alterTable/alterTable.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/alterTable/nwAlterTablesTestUsingSnappyJob.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow","ReplicatedRow"
@@ -13,7 +13,7 @@ io/snappydata/hydra/alterTable/nwAlterTablesTestUsingSnappyJob.conf
 io/snappydata/hydra/alterTable/nwAlterPartitionedRowTablesTestUsingSnappyShell.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -24,7 +24,7 @@ io/snappydata/hydra/alterTable/nwAlterPartitionedRowTablesTestUsingSnappyShell.c
 io/snappydata/hydra/alterTable/nwAlterReplicatedRowTablesTestUsingSnappyShell.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -35,7 +35,7 @@ io/snappydata/hydra/alterTable/nwAlterReplicatedRowTablesTestUsingSnappyShell.co
 io/snappydata/hydra/alterTable/nwAlterPartitionedRowTablesTestUsingJDBCClient.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -46,7 +46,7 @@ io/snappydata/hydra/alterTable/nwAlterPartitionedRowTablesTestUsingJDBCClient.co
 io/snappydata/hydra/alterTable/nwAlterReplicatedRowTablesTestUsingJDBCClient.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/startLongRunningCluster.bt b/dtests/src/test/java/io/snappydata/hydra/cluster/startLongRunningCluster.bt
index 68f25d8a52..5c37ac7869 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/startLongRunningCluster.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/startLongRunningCluster.bt
@@ -1,6 +1,6 @@
 io/snappydata/hydra/cluster/startLongRunningCluster.conf
   A=snappyStore snappyStoreHosts=10 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=10 workerVMsPerHost=1 workerThreadsPerVM=1
   numVMsToStop=1
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/distIndex/distIndex.bt b/dtests/src/test/java/io/snappydata/hydra/distIndex/distIndex.bt
index b5f92ae67c..e795f86e55 100644
--- a/dtests/src/test/java/io/snappydata/hydra/distIndex/distIndex.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/distIndex/distIndex.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/distIndex/distIndexResultValidation.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   redundantCopies=1
   buckets_Order_Lineitem=113
   buckets_Cust_Part_PartSupp=113
@@ -16,7 +16,7 @@ io/snappydata/hydra/distIndex/distIndexResultValidation.conf
 io/snappydata/hydra/distIndex/distIndexPerformance.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   redundantCopies=1
   buckets_Order_Lineitem=113
   buckets_Cust_Part_PartSupp=113
diff --git a/dtests/src/test/java/io/snappydata/hydra/installJar/installJar.bt b/dtests/src/test/java/io/snappydata/hydra/installJar/installJar.bt
index 029ac52d39..bd60acacf3 100644
--- a/dtests/src/test/java/io/snappydata/hydra/installJar/installJar.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/installJar/installJar.bt
@@ -1,25 +1,25 @@
 io/snappydata/hydra/installJar/dynamicJarLoadingUsingSnappyJob.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
 
 io/snappydata/hydra/installJar/dynamicJarLoadingUsingSnappyShell.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
 
 io/snappydata/hydra/installJar/dynamicJarLoadingUsingSparkApp.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
 
 io/snappydata/hydra/installJar/dynamicJarLoadingUsingSnappyJobsWithIdenticalNames.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
 
 io/snappydata/hydra/installJar/verifyCleanupAfterSnappyJobExecution.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
\ No newline at end of file
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/longRun/longRunTest.bt b/dtests/src/test/java/io/snappydata/hydra/longRun/longRunTest.bt
index 4a3e6dcbc1..2875106516 100644
--- a/dtests/src/test/java/io/snappydata/hydra/longRun/longRunTest.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/longRun/longRunTest.bt
@@ -7,7 +7,7 @@
 io/snappydata/hydra/longRun/longRunTestEmbededMode.conf
   A=snappyStore snappyStoreHosts=4 snappyStoreVMsPerHost=2 snappyStoreThreadsPerVM=4
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   redundantCopies=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -17,7 +17,7 @@ io/snappydata/hydra/longRun/longRunTestEmbededMode.conf
 io/snappydata/hydra/longRun/longRunTestEmbededModeWithHA.conf
   A=snappyStore snappyStoreHosts=4 snappyStoreVMsPerHost=2 snappyStoreThreadsPerVM=4
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   redundantCopies=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -27,7 +27,7 @@ io/snappydata/hydra/longRun/longRunTestEmbededModeWithHA.conf
 io/snappydata/hydra/longRun/longRunTestSplitMode.conf
   A=snappyStore snappyStoreHosts=4 snappyStoreVMsPerHost=2 snappyStoreThreadsPerVM=4
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   redundantCopies=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/northWind.bt b/dtests/src/test/java/io/snappydata/hydra/northwind/northWind.bt
index db71b11497..c2b1b2e99e 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/northWind.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/northWind.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/northwind/nwReplicatedRowTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -13,7 +13,7 @@ io/snappydata/hydra/northwind/nwReplicatedRowTablesTest.conf
 io/snappydata/hydra/northwind/nwPartitionedRowTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -25,7 +25,7 @@ io/snappydata/hydra/northwind/nwPartitionedRowTablesTest.conf
 io/snappydata/hydra/northwind/nwColumnTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
@@ -37,7 +37,7 @@ io/snappydata/hydra/northwind/nwColumnTablesTest.conf
 io/snappydata/hydra/northwind/nwColocatedTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -49,7 +49,7 @@ io/snappydata/hydra/northwind/nwColocatedTablesTest.conf
 io/snappydata/hydra/northwind/nwPersistentReplicatedRowTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -60,7 +60,7 @@ io/snappydata/hydra/northwind/nwPersistentReplicatedRowTablesTest.conf
 io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -71,7 +71,7 @@ io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTest.conf
 io/snappydata/hydra/northwind/nwPersistentColumnTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
@@ -82,7 +82,7 @@ io/snappydata/hydra/northwind/nwPersistentColumnTablesTest.conf
 io/snappydata/hydra/northwind/nwPersistentColocatedTablesTest.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -93,7 +93,7 @@ io/snappydata/hydra/northwind/nwPersistentColocatedTablesTest.conf
 io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionTest.conf
   A=snappyStore snappyStoreHosts=2 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -104,7 +104,7 @@ io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionTest.conf
 io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionOverflowTest.conf
   A=snappyStore snappyStoreHosts=2 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -115,7 +115,7 @@ io/snappydata/hydra/northwind/nwPartitionedRowTablesWithEvictionOverflowTest.con
 io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesWithEvictionOverflowTest.conf
   A=snappyStore snappyStoreHosts=2 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/northWindHA.bt b/dtests/src/test/java/io/snappydata/hydra/northwind/northWindHA.bt
index 6512a8e3ba..914c6260d4 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/northWindHA.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/northWindHA.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/northwind/nwReplicatedRowTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -14,7 +14,7 @@ io/snappydata/hydra/northwind/nwReplicatedRowTablesTestWithServerHA.conf
 io/snappydata/hydra/northwind/nwPartitionedRowTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -27,7 +27,7 @@ io/snappydata/hydra/northwind/nwPartitionedRowTablesTestWithServerHA.conf
 io/snappydata/hydra/northwind/nwColumnTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
@@ -40,7 +40,7 @@ io/snappydata/hydra/northwind/nwColumnTablesTestWithServerHA.conf
 io/snappydata/hydra/northwind/nwColocatedTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -53,7 +53,7 @@ io/snappydata/hydra/northwind/nwColocatedTablesTestWithServerHA.conf
 io/snappydata/hydra/northwind/nwReplicatedRowTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -66,7 +66,7 @@ io/snappydata/hydra/northwind/nwReplicatedRowTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwPartitionedRowTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -79,7 +79,7 @@ io/snappydata/hydra/northwind/nwPartitionedRowTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwColumnTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
@@ -92,7 +92,7 @@ io/snappydata/hydra/northwind/nwColumnTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwColocatedTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -105,7 +105,7 @@ io/snappydata/hydra/northwind/nwColocatedTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwPersistentReplicatedRowTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -117,7 +117,7 @@ io/snappydata/hydra/northwind/nwPersistentReplicatedRowTablesTestWithServerHA.co
 io/snappydata/hydra/northwind/nwPersistentReplicatedRowTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -129,7 +129,7 @@ io/snappydata/hydra/northwind/nwPersistentReplicatedRowTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -141,7 +141,7 @@ io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTestWithServerHA.c
 io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="PartitionedRow"
@@ -153,7 +153,7 @@ io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTestWithLeadHA.con
 io/snappydata/hydra/northwind/nwPersistentColumnTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
@@ -165,7 +165,7 @@ io/snappydata/hydra/northwind/nwPersistentColumnTablesTestWithServerHA.conf
 io/snappydata/hydra/northwind/nwPersistentColumnTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
@@ -177,7 +177,7 @@ io/snappydata/hydra/northwind/nwPersistentColumnTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwPersistentColocatedTablesTestWithServerHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -189,7 +189,7 @@ io/snappydata/hydra/northwind/nwPersistentColocatedTablesTestWithServerHA.conf
 io/snappydata/hydra/northwind/nwPersistentColocatedTablesTestWithLeadHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Colocated"
@@ -201,7 +201,7 @@ io/snappydata/hydra/northwind/nwPersistentColocatedTablesTestWithLeadHA.conf
 io/snappydata/hydra/northwind/nwReplicatedRowTablesTestWithLocatorHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="ReplicatedRow"
@@ -277,7 +277,7 @@ io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesTestWithLocatorHA.
 io/snappydata/hydra/northwind/nwPersistentColumnTablesTestWithLocatorHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
   tableType="Column"
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/northWindUseCase.bt b/dtests/src/test/java/io/snappydata/hydra/northwind/northWindUseCase.bt
index 088f0db31c..6f6dc0e337 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/northWindUseCase.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/northWindUseCase.bt
@@ -1,6 +1,6 @@
 io/snappydata/hydra/northwind/northwindUseCase.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="$GEMFIRE/../../../tests/common/src/main/resources/northwind/"
diff --git a/dtests/src/test/java/io/snappydata/hydra/recovery/clusterRestartWithPersistentRecovery.bt b/dtests/src/test/java/io/snappydata/hydra/recovery/clusterRestartWithPersistentRecovery.bt
index ac8ebf7cef..795e212813 100644
--- a/dtests/src/test/java/io/snappydata/hydra/recovery/clusterRestartWithPersistentRecovery.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/recovery/clusterRestartWithPersistentRecovery.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/recovery/clusterRestartWithPersistentRecovery.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1
   dataLocation="/export/shared/QA_DATA/RCORE_DATA/"
 
diff --git a/dtests/src/test/java/io/snappydata/hydra/startUp/nwTestWithAllServerHA_meanKill_clusterRestartWithRandomOrderForServerStartUp.bt b/dtests/src/test/java/io/snappydata/hydra/startUp/nwTestWithAllServerHA_meanKill_clusterRestartWithRandomOrderForServerStartUp.bt
index 20a2b4652a..121714625e 100644
--- a/dtests/src/test/java/io/snappydata/hydra/startUp/nwTestWithAllServerHA_meanKill_clusterRestartWithRandomOrderForServerStartUp.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/startUp/nwTestWithAllServerHA_meanKill_clusterRestartWithRandomOrderForServerStartUp.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/startUp/nwTestWithAllServerHA_meanKill_clusterRestartWithRandomOrderForServerStartUp.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column"
diff --git a/dtests/src/test/java/io/snappydata/hydra/startUp/startUp.bt b/dtests/src/test/java/io/snappydata/hydra/startUp/startUp.bt
index f3f052a5aa..4002036eab 100644
--- a/dtests/src/test/java/io/snappydata/hydra/startUp/startUp.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/startUp/startUp.bt
@@ -1,7 +1,7 @@
 io/snappydata/hydra/startUp/nwTestWithServerHA_clusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column","PartitionedRow","ReplicatedRow","Colocated"
@@ -14,7 +14,7 @@ io/snappydata/hydra/startUp/nwTestWithServerHA_clusterRestart.conf
 io/snappydata/hydra/startUp/nwTestWithLeadHA_clusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column","PartitionedRow","ReplicatedRow","Colocated"
@@ -66,7 +66,7 @@ io/snappydata/hydra/startUp/nwColumnTablesTestWithDdlOpAfterLocatorStop_ClusterR
 io/snappydata/hydra/startUp/nwColumnTablesTestWithDdlOpDuringLocatorHA.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column"
@@ -91,7 +91,7 @@ io/snappydata/hydra/startUp/nwPersistentColumnTablesTestWithDdlOpDuringLocatorHA
 io/snappydata/hydra/startUp/nwPersistentColumnTablesTestWithDdlOpAfterLocatorStop_ClusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column"
@@ -103,7 +103,7 @@ io/snappydata/hydra/startUp/nwPersistentColumnTablesTestWithDdlOpAfterLocatorSto
 io/snappydata/hydra/startUp/nwPersistentColumnTablesTestWithDdlOpAfterAllLocatorStop_ClusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column"
@@ -115,7 +115,7 @@ io/snappydata/hydra/startUp/nwPersistentColumnTablesTestWithDdlOpAfterAllLocator
 io/snappydata/hydra/startUp/executeAndVerifyOpsAfterAllLocatorsStopAndClusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
   tableType="Column"
@@ -127,7 +127,7 @@ io/snappydata/hydra/startUp/executeAndVerifyOpsAfterAllLocatorsStopAndClusterRes
 io/snappydata/hydra/startUp/nwTestWithServerHAWithRebalance_clusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   E=newStore newStoreHosts=1 newStoreVMsPerHost=1 newStoreThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
@@ -140,7 +140,7 @@ io/snappydata/hydra/startUp/nwTestWithServerHAWithRebalance_clusterRestart.conf
 io/snappydata/hydra/startUp/nwTestWithOpsDuringServerHA_clusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   E=newStore newStoreHosts=1 newStoreVMsPerHost=1 newStoreThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
@@ -153,7 +153,7 @@ io/snappydata/hydra/startUp/nwTestWithOpsDuringServerHA_clusterRestart.conf
 io/snappydata/hydra/startUp/nwTestWithNewServerRebalance_clusterRestart.conf
   A=snappyStore snappyStoreHosts=3 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=1
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
-  C=locator locatorHosts=3 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  C=locator locatorHosts=2 locatorVMsPerHost=1 locatorThreadsPerVM=1
   D=worker workerHosts=3 workerVMsPerHost=1 workerThreadsPerVM=1
   E=newStore newStoreHosts=1 newStoreVMsPerHost=1 newStoreThreadsPerVM=1
   dataFilesLocation="/export/shared/QA_DATA/northWind_1GB/"
diff --git a/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala
index 99a63eb032..e52f068f9c 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/InstallJarTestSparkApp.scala
@@ -25,10 +25,6 @@ import org.apache.spark.{SparkConf, SparkContext}
 import scala.util.{Failure, Success, Try}
 
 object InstallJarTestSparkApp {
-  val conf = new SparkConf().
-      setAppName("InstallJarTest Application")
-  val sc = new SparkContext(conf)
-  val snc = SnappyContext(sc)
 
   def main(args: Array[String]): Unit = {
     val conf = new SparkConf().

From 653db9eb37b44741749ab3439504db7643020ea6 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Tue, 3 Apr 2018 15:01:11 +0530
Subject: [PATCH 10/33] - Support for having user specified connection
 properties - Fix for test issue in snapshot isolation test

---
 .../connectionPool/HikariConnectionPool.java  | 13 +++++---
 .../SnappyConnectionPoolPrms.java             | 17 ++++++++++
 .../connectionPool/TomcatConnectionPool.java  |  8 +++++
 .../hydra/northwind/local.northWind.conf      |  9 +++---
 .../SnapshotIsolationTest.java                |  9 ++++--
 .../io/snappydata/hydra/SnappyTestUtils.scala | 32 +++++++++----------
 6 files changed, 60 insertions(+), 28 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/connectionPool/HikariConnectionPool.java b/dtests/src/test/java/io/snappydata/hydra/connectionPool/HikariConnectionPool.java
index 8f131fa22b..fc0a025f6e 100644
--- a/dtests/src/test/java/io/snappydata/hydra/connectionPool/HikariConnectionPool.java
+++ b/dtests/src/test/java/io/snappydata/hydra/connectionPool/HikariConnectionPool.java
@@ -18,12 +18,12 @@
 package io.snappydata.hydra.connectionPool;
 
 import java.sql.Connection;
+import java.util.Properties;
 
 import com.zaxxer.hikari.HikariConfig;
 import com.zaxxer.hikari.HikariDataSource;
 import hydra.Log;
 import io.snappydata.hydra.cluster.SnappyTest;
-import org.apache.tomcat.jdbc.pool.PoolProperties;
 
 public class HikariConnectionPool {
 
@@ -34,9 +34,14 @@ private HikariConnectionPool() {
     Log.getLogWriter().info(" Creating instance of HikariConnectionPool");
     String url = SnappyConnectionPoolPrms.getUrl() + SnappyTest.validateLocatorEndpointData().get
         (0);
-    PoolProperties p = new PoolProperties();
-
-    HikariConfig jdbcConfig = new HikariConfig();
+    Properties connProperties = new Properties();
+    //user connection properties
+    String[] strArr = SnappyConnectionPoolPrms.getConnProperties();
+    for(int i = 0; i < strArr.length; i++) {
+      String prop[] = strArr[i].split("=");
+      connProperties.setProperty(prop[0],prop[1]);
+    }
+    HikariConfig jdbcConfig = new HikariConfig(connProperties);
     jdbcConfig.setPoolName(SnappyConnectionPoolPrms.getPoolName());
     jdbcConfig.setMaximumPoolSize(SnappyConnectionPoolPrms.getInitialSize());
     jdbcConfig.setJdbcUrl(url);
diff --git a/dtests/src/test/java/io/snappydata/hydra/connectionPool/SnappyConnectionPoolPrms.java b/dtests/src/test/java/io/snappydata/hydra/connectionPool/SnappyConnectionPoolPrms.java
index b890e5e1f5..dea3d4ba76 100644
--- a/dtests/src/test/java/io/snappydata/hydra/connectionPool/SnappyConnectionPoolPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/connectionPool/SnappyConnectionPoolPrms.java
@@ -17,6 +17,8 @@
 
 package io.snappydata.hydra.connectionPool;
 
+import java.util.Vector;
+
 import hydra.BasePrms;
 import hydra.TestConfig;
 
@@ -38,6 +40,9 @@ public class SnappyConnectionPoolPrms extends BasePrms {
 
   public static Long poolName;
 
+  /* space seperated connection properties to be set while creating a connection */
+  public static Long connectionProperties;
+
   public static String getString(Long key){
     return TestConfig.tasktab().stringAt(key, TestConfig.tab().stringAt(key, null));
   }
@@ -87,6 +92,18 @@ else if (connPoolType.equalsIgnoreCase("Tomcat"))
       return -1; //don't use connection pool
   }
 
+  public static String[] getConnProperties() {
+    Long key = connectionProperties;
+    Vector connProperties = TestConfig.tasktab().vecAt(key, TestConfig.tab().vecAt(key, null));
+    String[] strArr = new String[connProperties.size()];
+    if (connProperties != null) {
+      for (int i = 0; i < connProperties.size(); i++) {
+        strArr[i] = (String)connProperties.get(i);
+      }
+    }
+    return strArr;
+  }
+
   static {
     BasePrms.setValues(SnappyConnectionPoolPrms.class);
   }
diff --git a/dtests/src/test/java/io/snappydata/hydra/connectionPool/TomcatConnectionPool.java b/dtests/src/test/java/io/snappydata/hydra/connectionPool/TomcatConnectionPool.java
index dc5ec2613a..68029a5caf 100644
--- a/dtests/src/test/java/io/snappydata/hydra/connectionPool/TomcatConnectionPool.java
+++ b/dtests/src/test/java/io/snappydata/hydra/connectionPool/TomcatConnectionPool.java
@@ -41,6 +41,14 @@ private TomcatConnectionPool() {
     p.setPassword(SnappyConnectionPoolPrms.getPassword());
     p.setInitialSize(SnappyConnectionPoolPrms.getInitialSize());
     p.setMaxWait(SnappyConnectionPoolPrms.getMaxWait());
+    //user connection properties
+    String[] connProperties = SnappyConnectionPoolPrms.getConnProperties();
+    String connProp = "";
+    for(int i = 0; i < connProperties.length; i++)
+      connProp = connProp + connProperties[i] + ";";
+    int lastIndex = connProp.lastIndexOf(";");
+    if (lastIndex != -1) connProp = connProp.substring(0,lastIndex);
+    p.setConnectionProperties(connProp);
     datasource = new DataSource();
     datasource.setPoolProperties(p);
   }
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf
index 3feb1a63cd..b9e84b0884 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/local.northWind.conf
@@ -1,12 +1,11 @@
-
+/*
 hydra.HostPrms-hostNames =
 fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${snappyStoreHosts})" ncf
-fcn "hydra.TestConfigFcns.pool(\"<host4> <host5>\", ${leadHosts})" ncf
-fcn "hydra.TestConfigFcns.pool(\"<host4> <host4> <host5>\", ${locatorHosts})" ncf
+fcn "hydra.TestConfigFcns.pool(\"<host1> <host2>\", ${leadHosts})" ncf
+fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${locatorHosts})" ncf
 fcn "hydra.TestConfigFcns.pool(\"<host1> <host2> <host3>\", ${workerHosts})" ncf
 ;
-
-
+*/
 io.snappydata.hydra.cluster.SnappyPrms-serverLauncherProps = " -heap-size=4g -memory-size=8g -J-XX:+UnlockDiagnosticVMOptions  -J-XX:ParGCCardsPerStrideChunk=32768 -member-timeout=60000 -J-Dp2p.socket_timeout=60000 ";
 io.snappydata.hydra.cluster.SnappyPrms-locatorLauncherProps = " -member-timeout=60000 -J-Dp2p.socket_timeout=60000 ";
 
diff --git a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTest.java b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTest.java
index 9835f78da7..23c75f8749 100644
--- a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/SnapshotIsolationTest.java
@@ -783,10 +783,15 @@ public static void HydraTask_verifyResults() {
   public void verifyResults() {
     try {
       if (SnapshotIsolationDMLOpsBB.getBB().getSharedMap().containsKey("dmlThreads")) {
+        ArrayList<Integer> selectThreads = null;
         ArrayList<Integer> dmlthreads = (ArrayList<Integer>)SnapshotIsolationDMLOpsBB.getBB().getSharedMap().get("dmlThreads");
         for (int i : dmlthreads) {
-          if(hasDuplicateSchemas)
-            replayOpsInDerby(i + "_" + getMyTid());
+          if(hasDuplicateSchemas) {
+            if (SnapshotIsolationDMLOpsBB.getBB().getSharedMap().containsKey("selectThreads"))
+              selectThreads = (ArrayList<Integer>)SnapshotIsolationDMLOpsBB.getBB().getSharedMap().get("selectThreads");
+            for (int selectTid : selectThreads)
+              replayOpsInDerby(i + "_" + selectTid);
+          }
           else
             replayOpsInDerby(String.valueOf(i));
         }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index f206320262..dbcf6c4341 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -23,7 +23,6 @@ import org.apache.spark.sql.catalyst.encoders.RowEncoder
 
 import scala.io.Source
 
-
 object SnappyTestUtils {
 
   var validateFullResultSet: Boolean = false;
@@ -32,8 +31,7 @@ object SnappyTestUtils {
 
   /*
   Executes the join query, matches only the full result with expected result, returns false if the
-  query
-  validation has failed.
+  query validation has failed.
   */
   def assertJoin(snc: SnappyContext, sqlString: String, queryNum: String, pw: PrintWriter,
       sqlContext: SQLContext): Boolean = {
@@ -61,12 +59,13 @@ object SnappyTestUtils {
   /*
    Executes the query, matches only the full resultSet with expected result, returns false if the
    query validation has failed.
- */
+   */
   def assertQuery(snc: SnappyContext, sqlString: String, queryNum: String,
       pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     numRowsValidation = false
     assertQuery(snc, sqlString, 0, queryNum, pw, sqlContext)
   }
+
   /*
    Executes the query, matches the result with expected result, returns false if the query
    validation has failed.
@@ -88,7 +87,7 @@ object SnappyTestUtils {
       }
       pw.flush()
     }
-    var fullRSValidationFailed = false
+    var fullRSValidationFailed: Boolean = false
     if (validateFullResultSet) {
 
       val snappyQueryFileName = s"Snappy_${queryNum}"
@@ -126,19 +125,17 @@ object SnappyTestUtils {
       } catch {
         case ex: Exception => {
           fullRSValidationFailed = true
-          pw.println(s"Full resultSet validation failed for ${queryNum} with following " +
-              s"exception:\n")
+          pw.println(s"Full resultSet validation for ${queryNum} got the following exception:\n")
           ex.printStackTrace(pw)
         }
       }
       pw.flush()
     }
-
     if (validationFailed) {
-      pw.println(s"\nNumRows validation failed for query ${queryNum} on {$tableType} table.")
+      pw.println(s"\nNumRows validation failed for query ${queryNum} on ${tableType} table.")
     }
     if(fullRSValidationFailed){
-      pw.println(s"\nFull resultset validation failed for query ${queryNum} on {$tableType} table.")
+      pw.println(s"\nFull resultset validation failed for query ${queryNum} on ${tableType} table.")
       validationFailed = true
     }
     pw.flush()
@@ -162,7 +159,7 @@ object SnappyTestUtils {
 
   /*
    Writes the query resultset to a csv file.
- */
+   */
   def writeToFile(df: DataFrame, dest: String, snc: SnappyContext): Unit = {
     import snc.implicits._
     df.map(dataTypeConverter)(RowEncoder(df.schema))
@@ -182,8 +179,8 @@ object SnappyTestUtils {
   }
 
   /*
-  Returns the path for the directory where the output of resultset of queries have been saved.
-  Creates a new directory, if not already existing
+   Returns the path for the directory where the output of resultset of queries have been saved.
+   Creates a new directory, if not already existing
    */
   def getQueryResultDir(dirName: String): String = {
     val log: File = new File(".")
@@ -208,8 +205,9 @@ object SnappyTestUtils {
     return queryResultDir.getAbsolutePath
   }
 
-  /* In case of round-off, there is a difference of .1 is snappy and spark. We can ignore such
-  differences
+  /*
+   In case of round-off, there is a difference of .1 in snappy and spark results. We can ignore
+   such differences
    */
   def isIgnorable(actualLine: String, expectedLine: String): Boolean = {
     var canBeIgnored = false
@@ -256,7 +254,7 @@ object SnappyTestUtils {
   }
 
   /*
-    If validation has failed for a query, add the query number to failedQueries String
+   If validation has failed for a query, add the query number to failedQueries String
    */
   def addToFailedQueryList(failedQueries: String, queryNum: String): String = {
     var str = failedQueries
@@ -272,7 +270,7 @@ object SnappyTestUtils {
   /*
    Performs full resultSet validation from snappy for a select query against results in a
    goldenFile.
- */
+   */
   def assertValidateFullResultSetFromGoldenFile(sqlString: String, queryNum: String, tableType:
   String, snc: SnappyContext, pw: PrintWriter, validationFailed: Boolean, goldenFileDest: String):
   Boolean = {

From 3d3622a1a6caf2e252d3bac613b5c7343da9d544 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Wed, 11 Apr 2018 13:04:44 +0530
Subject: [PATCH 11/33] - Fixing a test issue.

---
 .../test/scala/io/snappydata/hydra/SnappyTestUtils.scala   | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index ea873c5b52..6a700fba18 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -14,6 +14,7 @@
  * permissions and limitations under the License. See accompanying
  * LICENSE file.
  */
+
 package io.snappydata.hydra
 
 import java.io.{File, PrintWriter}
@@ -239,15 +240,13 @@ object SnappyTestUtils {
       if (actualLineSet.hasNext) {
         pw.println(s"Following ${actualLineSet.size} rows are unexpected in Snappy:")
         while (actualLineSet.hasNext) {
-          val txt: String = actualLineSet.next()
-          pw.println(s"$txt")
+          pw.println(s"${actualLineSet.next()}")
         }
       }
       if (expectedLineSet.hasNext) {
         pw.println(s"Following ${expectedLineSet.size} rows are missing in Snappy:")
         while (expectedLineSet.hasNext) {
-          val txt: String = actualLineSet.next()
-          pw.println(s"$txt")
+          pw.println(s"${expectedLineSet.next()}")
         }
       }
     }

From e81fa9bc2979e6f44cf0bb14868ebb56e0aa8658 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 3 Sep 2018 12:40:47 +0530
Subject: [PATCH 12/33] - Missed in the previous merge.

---
 .../ct/ValidateCTQueriesWithPutIntoJob.scala  | 29 ++++++++++-----
 .../hydra/northwind/NWTestUtil.scala          |  4 +-
 ...ateNWQueriesWithChangingConstantsApp.scala | 37 +++++++++++++------
 ...ateNWQueriesWithChangingConstantsJob.scala | 32 ++++++++++++----
 4 files changed, 72 insertions(+), 30 deletions(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesWithPutIntoJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesWithPutIntoJob.scala
index 756b482235..8f0cb2282c 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesWithPutIntoJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesWithPutIntoJob.scala
@@ -18,13 +18,15 @@
 package io.snappydata.hydra.ct
 
 import java.io.{File, FileOutputStream, PrintWriter}
+
 import scala.util.{Failure, Success, Try}
 
 import com.typesafe.config.Config
+import io.snappydata.hydra.SnappyTestUtils
 import util.TestException
 
 import org.apache.spark.SparkContext
-import org.apache.spark.sql.{SnappySession, SQLContext, SnappyJobValid, SnappyJobValidation, SnappySQLJob}
+import org.apache.spark.sql.{SQLContext, SnappyJobValid, SnappyJobValidation, SnappySQLJob, SnappySession}
 
 class ValidateCTQueriesWithPutIntoJob extends SnappySQLJob {
 
@@ -32,7 +34,8 @@ class ValidateCTQueriesWithPutIntoJob extends SnappySQLJob {
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
 
     val threadID = Thread.currentThread().getId
-    val outputFile = "ValidateCTQueriesJob_thread_" + threadID + "_" + System.currentTimeMillis + ".out"
+    val outputFile =
+      "ValidateCTQueriesJob_thread_" + threadID + "_" + System.currentTimeMillis + ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true))
     val tableType = jobConfig.getString("tableType")
     val insertUniqueRecords = jobConfig.getString("insertUniqueRecords").toBoolean
@@ -44,27 +47,35 @@ class ValidateCTQueriesWithPutIntoJob extends SnappySQLJob {
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
       snc.setConf("dataFilesLocation", dataFilesLocation)
       CTQueries.snc = snc
+      // scalastyle:off println
       pw.println(s"Validation for $tableType tables started in snappy Job")
       val fullResultSetValidation: Boolean = jobConfig.getBoolean("fullResultSetValidation")
       val sc = SparkContext.getOrCreate()
       val sqlContext = SQLContext.getOrCreate(sc)
-      if (fullResultSetValidation)
+      if (fullResultSetValidation) {
         pw.println(s"Test will perform fullResultSetValidation")
-      else
+      }
+      else {
         pw.println(s"Test will not perform fullResultSetValidation")
+      }
       val startTime = System.currentTimeMillis
-      val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, fullResultSetValidation,
-        sqlContext, insertUniqueRecords, skipNumRowsValidation)
+      CTTestUtil.insertUniqueRecords = insertUniqueRecords
+      SnappyTestUtils.numRowsValidation = skipNumRowsValidation
+      SnappyTestUtils.validateFullResultSet = fullResultSetValidation
+      val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, sqlContext)
       val endTime = System.currentTimeMillis
       val totalTime = (endTime - startTime) / 1000
       pw.println(s"Total time for execution is :: ${totalTime} seconds.")
       if (!failedQueries.isEmpty) {
-        println(s"Validation failed for ${tableType} for queries ${failedQueries}. See ${getCurrentDirectory}/${outputFile}")
+        println(s"Validation failed for ${tableType} for queries ${failedQueries}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
         pw.println(s"Validation failed for ${tableType} for queries ${failedQueries}. ")
         pw.close()
-        throw new TestException(s"Validation task failed for ${tableType}. See ${getCurrentDirectory}/${outputFile}")
+        throw new TestException(s"Validation task failed for ${tableType}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
       }
-      println(s"Validation for $tableType tables completed. See ${getCurrentDirectory}/${outputFile}")
+      println(s"Validation for $tableType tables completed. " +
+          s"See ${getCurrentDirectory}/${outputFile}")
       pw.println(s"Validation for $tableType tables completed.")
       pw.close()
     } match {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index 8c808288ab..0e5750df02 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -358,9 +358,9 @@ object NWTestUtil {
   }
 
   def executeAndValidateQueriesByChangingConstants(snc: SnappyContext, tableType: String,
-      pw: PrintWriter, sqlContext: SQLContext): Unit = {
+      pw: PrintWriter, sqlContext: SQLContext): String = {
     executeQueriesByChangingConstants = true
-    validateQueries(snc, tableType, pw, sqlContext)
+    return validateQueries(snc, tableType, pw, sqlContext)
   }
 
   def validateSelectiveQueriesFullResultSet(snc: SnappyContext, tableType: String, pw:
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
index 167bc1cfe0..64bccddd15 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
@@ -38,21 +38,36 @@ object ValidateNWQueriesWithChangingConstantsApp {
     NWQueries.dataFilesLocation = dataFilesLocation
     val tableType = args(1)
     val threadID = Thread.currentThread().getId
+    def getCurrentDirectory = new java.io.File(".").getCanonicalPath
     val outputFile = "ValidateNWQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
     // scalastyle:off println
-      pw.println(s"createAndLoadSparkTables Test started at : " + System.currentTimeMillis)
-      NWTestUtil.createAndLoadSparkTables(sqlContext)
-      println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
-      pw.println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
-      pw.println(s"ValidateQueries for ${tableType} tables Queries Test started at :" +
-          s" " + System.currentTimeMillis)
-        NWTestUtil.executeAndValidateQueriesByChangingConstants(snc, tableType, pw, sqlContext)
-      pw.println(s"validateQueries for ${tableType} tables Queries Test completed  " +
-          s"successfully at : " + System.currentTimeMillis)
+    var startTime = System.currentTimeMillis()
+    pw.println(s"createAndLoadSparkTables Test started at : " + startTime)
+    NWTestUtil.createAndLoadSparkTables(sqlContext)
+    var finishTime = System.currentTimeMillis()
+    var totalTime = (finishTime -startTime)/1000
+    pw.println(s"createAndLoadSparkTables completed successfully in :" + totalTime + " secs.")
+    pw.flush()
+    pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at" +
+        s" :  " + System.currentTimeMillis)
+    startTime = System.currentTimeMillis()
+    val failedQueries: String = NWTestUtil.executeAndValidateQueriesByChangingConstants(snc,
+      tableType, pw, sqlContext)
+    finishTime = System.currentTimeMillis()
+    totalTime = (finishTime - startTime)/1000
+    if (!failedQueries.isEmpty) {
+      println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. See " +
+          s"${getCurrentDirectory}/${outputFile}")
+      pw.println(s"Total execution took ${totalTime} seconds.")
+      pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+      pw.close()
+      throw new Exception(s"Validation task failed for ${tableType} tables. See " +
+          s"${getCurrentDirectory}/${outputFile}")
+    }
+      pw.println(s"ValidateQueries for ${tableType} tables Test completed successfully in : " +
+          totalTime + " secs.")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
index f31194fb27..9e607e0e63 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
@@ -25,6 +25,7 @@ import io.snappydata.hydra.northwind
 
 import org.apache.spark.SparkContext
 import org.apache.spark.sql._
+import _root_.util.TestException
 
 class ValidateNWQueriesWithChangingConstantsJob extends SnappySQLJob {
   override def runSnappyJob(snappySession: SnappySession, jobConfig: Config): Any = {
@@ -44,17 +45,32 @@ class ValidateNWQueriesWithChangingConstantsJob extends SnappySQLJob {
       northwind.NWQueries.snc = snc
       NWQueries.dataFilesLocation = dataFilesLocation
       // scalastyle:off println
-      pw.println(s"createAndLoadSparkTables Test started at : " + System.currentTimeMillis)
+      var startTime = System.currentTimeMillis()
+      pw.println(s"createAndLoadSparkTables Test started at : " + startTime)
       NWTestUtil.createAndLoadSparkTables(sqlContext)
-      println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
-      pw.println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
+      var finishTime = System.currentTimeMillis()
+      var totalTime = (finishTime -startTime)/1000
+      pw.println(s"createAndLoadSparkTables completed successfully in :" + totalTime + " secs.")
+      pw.flush()
       pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at" +
           s" :  " + System.currentTimeMillis)
-      NWTestUtil.executeAndValidateQueriesByChangingConstants(snc, tableType, pw, sqlContext)
-      pw.println(s"validateQueries for ${tableType} tables Queries Test completed  " +
-          s"successfully at : " + System.currentTimeMillis)
+      startTime = System.currentTimeMillis()
+      val failedQueries: String = NWTestUtil.executeAndValidateQueriesByChangingConstants(snc,
+        tableType, pw, sqlContext)
+      finishTime = System.currentTimeMillis()
+      totalTime = (finishTime -startTime)/1000
+            if (!failedQueries.isEmpty) {
+        println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
+        pw.println(s"Total execution took ${totalTime} seconds.")
+        pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+        pw.close()
+        throw new TestException(s"Validation task failed for ${tableType}. " +
+            s"See ${getCurrentDirectory}/${outputFile}")
+      }
+      pw.println(s"ValidateQueries for ${tableType} tables Test completed  " +
+          s"successfully in : " + totalTime + " secs.")
+
       pw.close()
     } match {
       case Success(v) => pw.close()

From b5955fb1d038c5ccd2b208cad8557b17480e9edf Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 3 Sep 2018 14:19:28 +0530
Subject: [PATCH 13/33] - Fixing compilation issues.

---
 .../scala/io/snappydata/hydra/TestUtil.scala  | 304 ------------------
 .../hydra/cdcConnector/CDCValidationApp.scala |  14 +-
 2 files changed, 6 insertions(+), 312 deletions(-)
 delete mode 100644 dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala

diff --git a/dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala
deleted file mode 100644
index 7f65699f6b..0000000000
--- a/dtests/src/test/scala/io/snappydata/hydra/TestUtil.scala
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Copyright (c) 2017 SnappyData, Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package io.snappydata.hydra
-
-import java.io.{File, PrintWriter}
-
-import scala.io.Source
-
-import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.encoders.RowEncoder
-
-object TestUtil {
-  var validateFullResultSet: Boolean = false;
-  var tableType: String = null
-
-  /*
-  Executes the join query, matches the result with expected result, returns false if the query
-  validation has failed.
-  */
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext, skipNumRowsValidation: Boolean): Boolean = {
-    var hasValidationFailed = false
-    snc.sql("set spark.sql.crossJoin.enabled = true")
-    if (!skipNumRowsValidation) {
-      val df = snc.sql(sqlString)
-      pw.println(s"No. rows in resultset for join query ${queryNum} is : ${df.count} for ${tableType} table")
-      if (df.count() != numRows) {
-        pw.println(s"Result mismatch for join query ${queryNum} : found ${df.count} rows but expected ${numRows} rows.")
-        hasValidationFailed = true
-      }
-      pw.flush()
-    }
-    if (validateFullResultSet)
-      hasValidationFailed = assertValidateFullResultSet(snc, sqlString, queryNum, pw, sqlContext,
-        hasValidationFailed)
-
-    if (hasValidationFailed)
-      pw.println(s"Failed Query =" + sqlString + " Table Type : " + tableType + "\n")
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  /*
-   Executes the query, matches the result with expected result, returns false if the query
-   validation has failed.
-   */
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext, skipNumRowsValidation: Boolean): Boolean = {
-
-    var hasValidationFailed = false
-    if (!skipNumRowsValidation) {
-      val df = snc.sql(sqlString)
-      pw.println(s"No. rows in resultset for query ${queryNum} is : ${df.count} for ${tableType} table")
-      if (df.count() != numRows) {
-        pw.println(s"Result mismatch for query ${queryNum} : found ${df.count} rows but expected ${numRows} rows.")
-        hasValidationFailed = true
-      }
-      pw.flush()
-    }
-    if (validateFullResultSet)
-      hasValidationFailed = assertValidateFullResultSet(snc, sqlString, queryNum, pw, sqlContext,
-        hasValidationFailed)
-
-    if (hasValidationFailed)
-      pw.println(s"Failed Query : " + sqlString + "\n Table Type : " + tableType + "\n")
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  /*
-  Performs full resultSet validation for snappy results for the query against snappy spark
-  resultset.
-   */
-  def assertValidateFullResultSet(snc: SnappyContext, sqlString: String, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext, validationFailed: Boolean): Boolean = {
-    var hasValidationFailed = validationFailed
-
-    val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = SnappyTestUtils.getTempDir("snappyQueryFiles") + File.separator +
-        snappyQueryFileName
-    pw.println(snappyDest)
-    val snappyFile: File = new java.io.File(snappyDest)
-    var snappyDF = snc.sql(sqlString)
-
-    val sparkQueryFileName = s"Spark_${queryNum}"
-    val sparkDest: String = SnappyTestUtils.getTempDir("sparkQueryFiles") + File.separator + sparkQueryFileName
-    pw.println(sparkDest)
-    val sparkFile: File = new java.io.File(sparkDest)
-    var sparkDF = sqlContext.sql(sqlString)
-
-    try {
-      if (!snappyFile.exists()) {
-        val snap_col1 = snappyDF.schema.fieldNames(0)
-        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
-        SnappyTestUtils.writeToFile(snappyDF, snappyDest, snc)
-        //writeResultSetToCsv(snappyDF, snappyFile)
-        pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
-      }
-      if (!sparkFile.exists()) {
-        val col1 = sparkDF.schema.fieldNames(0)
-        val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-        sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
-        SnappyTestUtils.writeToFile(sparkDF, sparkDest, snc)
-        //writeResultSetToCsv(sparkDF, sparkFile)
-        pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
-      }
-      hasValidationFailed = compareFiles(snappyFile, sparkFile, pw, hasValidationFailed)
-    } catch {
-      case ex: Exception => {
-        hasValidationFailed = true
-        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
-        ex.printStackTrace(pw)
-      }
-    }
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  /*
-  Writes the query resultset to a csv file.
-   */
-  def writeResultSetToCsv(df: DataFrame, destFile: File): Unit = {
-    val parent = destFile.getParentFile
-    if (!parent.exists()) {
-      parent.mkdirs()
-    }
-    implicit val encoder = RowEncoder(df.schema)
-    df.mapPartitions(rows => {
-      val sb: StringBuilder = new StringBuilder()
-      val pw = new PrintWriter(destFile)
-      try {
-        rows.foreach { row =>
-          row.toSeq.foreach {
-            case d: Double =>
-              // round to one decimal digit
-              sb.append(math.floor(d * 10.0 + 0.5) / 10.0).append(',')
-            case bd: java.math.BigDecimal =>
-              sb.append(bd.setScale(2, java.math.RoundingMode.HALF_UP)).append(',')
-            case v => sb.append(v).append(',')
-          }
-          val len = sb.length
-          if (len > 0) sb.setLength(len - 1)
-          sb.append('\n')
-          if (sb.length >= 1048576) {
-            pw.append(sb)
-            pw.flush()
-            sb.clear()
-          }
-        }
-        if (sb.nonEmpty) {
-          pw.append(sb)
-          pw.flush()
-        }
-      }
-      finally {
-        pw.close()
-      }
-      Iterator.empty
-    }).collect()
-  }
-
-  /*
-  Returns the path for the directory where the output of resultset of queries have been saved.
-  Creates a new directory, if not already existing
-   */
-  def getQueryResultDir(dirName: String): String = {
-    val log: File = new File(".")
-    var dest: String = null
-    val dirString = log.getCanonicalPath;
-    if (dirName.equals("sparkQueryFiles")) {
-      val logDir = log.listFiles.filter(_.getName.equals("snappyleader.log"))
-      if (!logDir.isEmpty) {
-        val leaderLogFile: File = logDir.iterator.next()
-        if (leaderLogFile.exists())
-          dest = dirString + File.separator + ".." + File.separator + ".." + File.separator + dirName
-      }
-      else dest = dirString + File.separator + ".." + File.separator + dirName
-    }
-    else dest = log.getCanonicalPath + File.separator + dirName
-    val queryResultDir: File = new File(dest)
-    if (!queryResultDir.exists)
-      queryResultDir.mkdir()
-    return queryResultDir.getAbsolutePath
-  }
-
-  /*
-  Performs full resultSet validation from snappy for a select query against results in a goldenFile.
-   */
-  def assertValidateFullResultSetFromGoldenFile(sqlString: String, queryNum: String, tableType:
-  String, snc: SnappyContext, pw: PrintWriter, validationFailed: Boolean, goldenFileDest: String):
-  Boolean = {
-    var hasValidationFailed = validationFailed
-
-    val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = SnappyTestUtils.getTempDir("snappyQueryFiles") + File.separator + snappyQueryFileName
-    pw.println(snappyDest)
-    val snappyFile: File = new java.io.File(snappyDest)
-    var snappyDF = snc.sql(sqlString)
-
-    pw.println(goldenFileDest)
-    val goldenFileName = goldenFileDest + File.separator + s"Spark_$queryNum"
-    val sortedGoldenDest = goldenFileDest + File.separator + s"Sorted_$queryNum"
-    val sortedGoldenFile: File = new java.io.File(sortedGoldenDest)
-    val goldenFile: File = new java.io.File(goldenFileName)
-
-    try {
-      if (!snappyFile.exists()) {
-        val snap_col1 = snappyDF.schema.fieldNames(0)
-        val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-        snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
-        SnappyTestUtils.writeToFile(snappyDF, snappyDest, snc)
-        //writeResultSetToCsv(snappyDF, snappyFile)
-        pw.println(s"${queryNum} Result Collected in file $snappyDest")
-      }
-      if (!goldenFile.exists()) {
-        pw.println(s"Did not find any golden file for query $queryNum")
-        throw new Exception(s"Did not find any golden file for query $queryNum")
-      } else if (goldenFile.length() > 0) {
-        // sort the contents of golden file before comparing results
-        var goldenDF = snc.read.format("com.databricks.spark.csv")
-            .option("header", "false").option("inferSchema", "true").option("nullValue", "NULL")
-            .load(goldenFileName)
-        val col1 = goldenDF.schema.fieldNames(0)
-        val col = goldenDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-        goldenDF = goldenDF.repartition(1).sortWithinPartitions(col1, col: _*)
-        SnappyTestUtils.writeToFile(goldenDF, sortedGoldenDest, snc)
-        //writeResultSetToCsv(goldenDF, sortedGoldenFile)
-        pw.println(s"${queryNum} Result Collected in file ${sortedGoldenDest}")
-      } else {
-        pw.println(s"zero results in query $queryNum.")
-      }
-      hasValidationFailed = compareFiles(snappyFile, sortedGoldenFile, pw, hasValidationFailed)
-
-    } catch {
-      case ex: Exception => {
-        hasValidationFailed = true
-        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
-        ex.printStackTrace(pw)
-      }
-    }
-    pw.flush()
-    return hasValidationFailed
-  }
-
-  def compareFiles(snappyFile: File, sparkFile: File, pw: PrintWriter, validationFailed: Boolean):
-  Boolean = {
-    var hasValidationFailed = validationFailed
-    val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
-    val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
-    val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines()
-
-    while (expectedLineSet.hasNext && actualLineSet.hasNext) {
-      val expectedLine = expectedLineSet.next()
-      val actualLine = actualLineSet.next()
-      if (!actualLine.equals(expectedLine)) {
-        hasValidationFailed = true
-        pw.println(s"Expected Result : $expectedLine")
-        pw.println(s"Actual Result   : $actualLine")
-      }
-    }
-    if (actualLineSet.hasNext || expectedLineSet.hasNext) {
-      hasValidationFailed = true
-      if (actualLineSet.hasNext)
-        pw.println(s"Following ${actualLineSet.size} rows are unexpected in Snappy:")
-      while (actualLineSet.hasNext)
-        pw.println(actualLineSet.next())
-      if (expectedLineSet.hasNext)
-        pw.println(s"Following ${expectedLineSet.size} rows are missing in Snappy:")
-      while (expectedLineSet.hasNext)
-        pw.println(expectedLineSet.next())
-    }
-    hasValidationFailed
-  }
-
-  /*
-    If validation has failed for a query, add the query number to failedQueries String
-   */
-  def addToFailedQueryList(failedQueries: String, queryNum: String): String = {
-    var str = failedQueries
-    if (str.isEmpty)
-      str = queryNum
-    else
-      str = str + "," + queryNum
-    return str
-  }
-
-}
diff --git a/dtests/src/test/scala/io/snappydata/hydra/cdcConnector/CDCValidationApp.scala b/dtests/src/test/scala/io/snappydata/hydra/cdcConnector/CDCValidationApp.scala
index 4496c4b4ad..a77d5ef42a 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/cdcConnector/CDCValidationApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/cdcConnector/CDCValidationApp.scala
@@ -17,18 +17,16 @@
 package io.snappydata.hydra.cdcConnector
 
 
-import java.io.{File, PrintWriter}
-import java.sql.{Connection, DriverManager, ResultSet}
+import java.io.PrintWriter
+import java.sql.{Connection, DriverManager}
 import java.util.Properties
 
+import scala.util.{Failure, Random, Success, Try}
+
 import breeze.numerics.abs
-import io.snappydata.hydra.SnappyTestUtils.{getTempDir, writeToFile}
-import org.apache.spark.sql.{SQLContext, SnappyContext}
-import org.apache.spark.{SparkConf, SparkContext}
 
-import scala.collection.mutable
-import scala.io.Source
-import scala.util.{Failure, Random, Success, Try}
+import org.apache.spark.sql.SnappyContext
+import org.apache.spark.{SparkConf, SparkContext}
 
 
 object CDCValidationApp {

From e9d04df7f36d3738d28234dc384c039a5f4e930a Mon Sep 17 00:00:00 2001
From: Sonal Agarwal <sagarwal@dev1.telx.snappydata.io>
Date: Tue, 11 Sep 2018 23:55:56 -0700
Subject: [PATCH 14/33] - Adding changes for stack dump at the end of the test.
 - Add method to compare files(results) without sorting the dataframe.

---
 .../org/apache/spark/sql/NWQueries.scala      |  45 +----
 .../adAnalytics/SnappyAdAnalyticsTest.java    |   8 +
 .../hydra/adAnalytics/adAnalytics.conf        |  20 +-
 .../adAnalyticsRestartStreaming.conf          |   5 +
 .../snappydata/hydra/cluster/SnappyPrms.java  |  19 ++
 .../snappydata/hydra/cluster/SnappyTest.java  | 127 +++++++++++-
 .../hydra/cluster/stopDualModeCluster.conf    |   4 +-
 .../cluster/stopEmbeddedModeCluster.conf      |   4 +-
 .../hydra/cluster/stopRowStoreCluster.conf    |  11 +-
 .../hydra/cluster/stopSnappyCluster.conf      |  11 +-
 .../hydra/northwind/stopDualModeCluster.conf  |   8 +-
 .../hydra/security/stopSnappyCluster.conf     |   4 +-
 .../hydra/storeTests/stopSnappyCluster.inc    |   3 +
 .../io/snappydata/hydra/SnappyTestUtils.scala | 182 ++++++++++++++++--
 .../hydra/northwind/NWQueries.scala           |  43 -----
 .../hydra/northwind/NWTestUtil.scala          | 133 +++++++------
 ...ateNWQueriesWithChangingConstantsApp.scala |   9 +-
 ...ateNWQueriesWithChangingConstantsJob.scala |   5 +-
 18 files changed, 438 insertions(+), 203 deletions(-)

diff --git a/cluster/src/test/scala/org/apache/spark/sql/NWQueries.scala b/cluster/src/test/scala/org/apache/spark/sql/NWQueries.scala
index 901df388bb..f8c31ff8f0 100644
--- a/cluster/src/test/scala/org/apache/spark/sql/NWQueries.scala
+++ b/cluster/src/test/scala/org/apache/spark/sql/NWQueries.scala
@@ -787,80 +787,37 @@ object NWQueries extends SnappyFunSuite {
     "Q23" -> Q23,
     "Q24" -> Q24,
     "Q25" -> Q25,
-    "Q25_1" -> Q25_1,
-    "Q25_2" -> Q25_2,
     "Q26" -> Q26,
-    "Q26_1" -> Q26_1,
-    "Q26_2" -> Q26_2,
     "Q27" -> Q27,
-    "Q27_1" -> Q27_1,
-    "Q27_2" -> Q27_2,
-    "Q27_3" -> Q27_3,
-    "Q27_4" -> Q27_4,
     "Q28" -> Q28,
-    "Q28_1" -> Q28_1,
-    "Q28_2" -> Q28_2,
     "Q29" -> Q29,
-    "Q29_1" -> Q29_1,
-    "Q29_2" -> Q29_2,
     "Q30" -> Q30,
-    "Q30_1" -> Q30_1,
-    "Q30_2" -> Q30_2,
     "Q31" -> Q31,
-    "Q31_1" -> Q31_1,
-    "Q31_2" -> Q31_2,
-    "Q31_3" -> Q31_3,
-    "Q31_4" -> Q31_4,
     "Q32" -> Q32,
-    "Q32_1" -> Q32_1,
     "Q33" -> Q33,
-    "Q33_1" -> Q33_1,
     "Q34" -> Q34,
-    "Q34_1" -> Q34_1,
-    "Q34_2" -> Q34_2,
     "Q35" -> Q35,
-    "Q35_1" -> Q35_1,
-    "Q35_2" -> Q35_2,
     "Q36" -> Q36,
-    "Q36_1" -> Q36_1,
-    "Q36_2" -> Q36_2,
     "Q37" -> Q37,
     "Q38" -> Q38,
-    "Q38_1" -> Q38_1,
-    "Q38_2" -> Q38_2,
     "Q39" -> Q39,
     "Q40" -> Q40,
-    "Q40_1" -> Q40_1,
-    "Q40_2" -> Q40_2,
     "Q41" -> Q41,
     "Q42" -> Q42,
-    "Q42_1" -> Q42_1,
-    "Q42_2" -> Q42_2,
     "Q43" -> Q43,
-    "Q43_1" -> Q43_1,
-    "Q43_2" -> Q43_2,
     "Q44" -> Q44,
     "Q45" -> Q45,
     "Q46" -> Q46,
     "Q47" -> Q47,
     "Q48" -> Q48,
     "Q49" -> Q49,
-    "Q49_1" -> Q49_1,
-    "Q49_2" -> Q49_2,
     "Q50" -> Q50,
     "Q51" -> Q51,
-    "Q51_1" -> Q51_1,
-    "Q51_2" -> Q51_2,
     "Q52" -> Q52,
     "Q53" -> Q53,
     "Q54" -> Q54,
     "Q55" -> Q55,
-    "Q55_1" -> Q55_1,
-    "Q55_2" -> Q55_2,
-    "Q56" -> Q56,
-    "Q56_1" -> Q56_1,
-    "Q56_2" -> Q56_2,
-    "Q56_3" -> Q56_3
+    "Q56" -> Q56
   )
 
   def regions(sqlContext: SQLContext): DataFrame =
diff --git a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
index 11124b923b..382009a88a 100644
--- a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
@@ -313,6 +313,10 @@ protected void generateAndPublish(Vector generatorAndPublisher) {
    * Stop kafka brokers.
    */
   public static synchronized void HydraTask_StopKafkaBrokers() {
+    if(snappyAdAnalyticsTest == null) {
+      snappyAdAnalyticsTest = new SnappyAdAnalyticsTest();
+      kafkaLogDir = getCurrentDirPath() + sep + "kafka_logs";
+    }
     snappyAdAnalyticsTest.stopKafkaBroker();
   }
 
@@ -334,6 +338,10 @@ protected void stopKafkaBroker() {
    */
 
   public static synchronized void HydraTask_StopKafkaZookeeper() {
+    if(snappyAdAnalyticsTest == null) {
+      snappyAdAnalyticsTest = new SnappyAdAnalyticsTest();
+      kafkaLogDir = getCurrentDirPath() + sep + "kafka_logs";
+    }
     snappyAdAnalyticsTest.stopKafkaZookeeper();
   }
 
diff --git a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalytics.conf b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalytics.conf
index 2505f35160..491de18e6c 100644
--- a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalytics.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalytics.conf
@@ -118,20 +118,20 @@ CLOSETASK  taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = Hy
     io.snappydata.hydra.cluster.SnappyPrms-sqlScriptNames = adAnalytics_queries.sql
     threadGroups = snappyThreads;
 
-CLOSETASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappy
-    threadGroups = snappyThreads;
+ENDTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+    clientNames = locator1;
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
-    threadGroups = snappyThreads;
+ENDTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
+    clientNames = locator1;
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_deleteSnappyConfig
-    threadGroups = snappyThreads;
+ENDTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_deleteSnappyConfig
+    clientNames = locator1;
 
-CLOSETASK     taskClass   = io.snappydata.hydra.adAnalytics.SnappyAdAnalyticsTest taskMethod  = HydraTask_StopKafkaBrokers
-    threadGroups = snappyThreads;
+ENDTASK     taskClass   = io.snappydata.hydra.adAnalytics.SnappyAdAnalyticsTest taskMethod  = HydraTask_StopKafkaBrokers
+    clientNames = locator1;
 
-CLOSETASK     taskClass   = io.snappydata.hydra.adAnalytics.SnappyAdAnalyticsTest taskMethod  = HydraTask_StopKafkaZookeeper
-    threadGroups = snappyThreads;
+ENDTASK     taskClass   = io.snappydata.hydra.adAnalytics.SnappyAdAnalyticsTest taskMethod  = HydraTask_StopKafkaZookeeper
+    clientNames = locator1;
 
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_cleanUpSnappyProcessesOnFailure
     clientNames = locator1;
diff --git a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalyticsRestartStreaming.conf b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalyticsRestartStreaming.conf
index 1ac2fea5f0..15e174de92 100644
--- a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalyticsRestartStreaming.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/adAnalyticsRestartStreaming.conf
@@ -25,4 +25,9 @@ CLOSETASK  taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = Hy
        io.snappydata.hydra.cluster.SnappyPrms-sqlScriptNames = adAnalytics_queries.sql
        threadGroups = snappyThreads;
 
+CLOSETASK  taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSQLScripts
+       io.snappydata.hydra.cluster.SnappyPrms-logFileName = sqlScriptResults1.log
+       io.snappydata.hydra.cluster.SnappyPrms-sqlScriptNames = adAnalytics_queries.sql
+       threadGroups = snappyThreads;
+
 INCLUDE $JTESTS/io/snappydata/hydra/adAnalytics/adAnalytics.conf;
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
index 027d3e896c..1239cb46cc 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
@@ -516,6 +516,25 @@ public class SnappyPrms extends BasePrms {
    */
   public static Long numRowsList;
 
+  /**
+   * Number of stack dump for each thread of locators, servers, leads
+   */
+  public static Long numOfStackDumpItr;
+
+  public static int getNumOfStackDumpItrs(){
+    Long key = numOfStackDumpItr;
+    return tasktab().intAt(key, tab().intAt(key, 3));
+  }
+
+  /**
+   * Sleep time in secs between 2 thread dumps.
+   */
+  public static Long sleepBtwnStackDump;
+
+  public static int getSleepBtwnStackDumps(){
+    Long key = sleepBtwnStackDump;
+    return tasktab().intAt(key, tab().intAt(key, 5));
+  }
 
   public static String getCredentialFile() {
     Long key = credentialFile;
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 8dc339a4eb..4064d22cf8 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -61,6 +61,8 @@ public class SnappyTest implements Serializable {
   protected static SnappyTest snappyTest;
   protected static HostDescription hd = TestConfig.getInstance().getMasterDescription()
       .getVmDescription().getHostDescription();
+  protected static HostDescription localhd = TestConfig.getInstance().getClientDescription
+      (RemoteTestModule.getMyClientName()).getVmDescription().getHostDescription();
   protected static char sep = hd.getFileSep();
   private static String gemfireHome = hd.getGemFireHome() + sep;
   protected static String productDir = gemfireHome + ".." + sep + "snappy" + sep;
@@ -1727,6 +1729,60 @@ public int executeProcess(ProcessBuilder pb, File logFile) {
     }
   }
 
+  protected  synchronized void recordSnappyProcessIDinNukeRun(String hostName, String pName){
+    String cmd = "ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " + hostName;
+    Process pr = null;
+    try {
+      String dest = getCurrentDirPath() + File.separator + "PIDs_" + pName + "_" +
+          hostName + ".log";
+      File logFile = new File(dest);
+      cmd += " jps | grep " + pName + " | awk '{print $1}'";
+      hd = TestConfig.getInstance().getMasterDescription()
+          .getVmDescription().getHostDescription();
+      ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", cmd);
+      pb.redirectOutput(ProcessBuilder.Redirect.appendTo(logFile));
+      pr = pb.start();
+      pr.waitFor();
+      FileInputStream fis = new FileInputStream(logFile);
+      BufferedReader br = new BufferedReader(new InputStreamReader(fis));
+      String str = null;
+      while ((str = br.readLine()) != null) {
+        int pid = Integer.parseInt(str);
+        try {
+          if (SnappyBB.getBB().getSharedMap().containsKey("pid_"+ pName + "_" + pid)) {
+            Log.getLogWriter().info("Pid " + pid + " is already recorded with Master");
+          } else {
+            Log.getLogWriter().info("Recording PID " + pid + " with Master.");
+            RemoteTestModule.Master.recordPID(hd, pid);
+            SnappyBB.getBB().getSharedMap().put("pid" + "_" + pName + "_" + str, str);
+            SnappyBB.getBB().getSharedMap().put("host" + "_" + pid + "_" + hostName, hostName);
+          }
+        } catch (RemoteException e) {
+          String s = "Unable to access master to record PID: " + pid;
+          throw new HydraRuntimeException(s, e);
+        }
+      }
+      br.close();
+    } catch (IOException e) {
+      String s = "Problem while starting the process : " + pr;
+      throw new TestException(s, e);
+    } catch (InterruptedException e) {
+      String s = "Exception occurred while waiting for the process execution : " + pr;
+      throw new TestException(s, e);
+    }
+  }
+
+  public static String getCurrentDirPath(){
+    String currentDir;
+    try {
+      currentDir = new File(".").getCanonicalPath();
+    } catch (IOException e) {
+      String s = "Problem while accessing the current dir.";
+      throw new TestException(s, e);
+    }
+    return currentDir;
+  }
+
   protected synchronized void recordSnappyProcessIDinNukeRun(String pName) {
     Process pr = null;
     try {
@@ -2174,7 +2230,7 @@ public void executeSparkJob(Vector jobClassNames, String logFileName) {
         pb = new ProcessBuilder("/bin/bash", "-c", command);
         int status = snappyTest.executeProcess(pb, logFile);
         if(status==1 && !cycleVms){
-          throw new TestException("Snappy job execution has failed. Please check the logs.");
+          throw new TestException("Spark Application has failed. Please check the logs.");
         }
         Log.getLogWriter().info("CDC stream is : " + SnappyCDCPrms.getIsCDCStream());
         if (SnappyCDCPrms.getIsCDCStream()) {
@@ -2580,6 +2636,28 @@ public static synchronized void HydraTask_createAndStartSnappyLocator() {
     }
   }
 
+  /*Dump stacks for threads in members of snappy cluster*/
+  public static synchronized void HydraTask_dumpStacks() {
+    initSnappyArtifacts();
+    int dumpItr = SnappyPrms.getNumOfStackDumpItrs();
+    for (int i = 0; i < dumpItr; i++) {
+      snappyTest.dumpStacks();
+      if (i < (dumpItr-1)) {
+        sleepForMs(SnappyPrms.getSleepBtwnStackDumps());
+      }
+    }
+  }
+
+  public void dumpStacks() {
+    Set pids = getPidList();
+    Iterator itr = pids.iterator();
+    while(itr.hasNext()) {
+      String val = (String)itr.next();
+      int pid = Integer.parseInt(val);
+      String host = getPidHost(val);
+      ProcessMgr.printProcessStacks(host,pid);
+    }
+  }
 
   /**
    * Create and start snappy server.
@@ -3342,8 +3420,36 @@ protected static String getMasterHost() {
     return masterHost;
   }
 
+  private String printStackTrace(Exception e){
+    StringWriter error = new StringWriter();
+    e.printStackTrace(new PrintWriter(error));
+    return error.toString();
+  }
+
+  public List<String> getHostNameFromConf(String nodeName){
+    List<String>  hostNames = new ArrayList<>();
+    String confFile = snappyTest.getScriptLocation(productConfDirPath + File.separator + nodeName);
+    try {
+      File file = new File(confFile);
+      FileReader fileReader = new FileReader(file);
+      BufferedReader bufferedReader = new BufferedReader(fileReader);
+
+      String line;
+      while ((line = bufferedReader.readLine()) != null) {
+        String[] data = line.trim().split(" ");
+        if(!hostNames.contains(data[0]))
+          hostNames.add(data[0]);
+      }
+      fileReader.close();
+    } catch (IOException e) {
+      Log.getLogWriter().info(printStackTrace(e));
+    }
+    return hostNames;
+  }
+
   protected void startSnappyLocator() {
     File log = null;
+    List<String> hostNames = getHostNameFromConf("locators");
     ProcessBuilder pb = null;
     try {
       if (useRowStore) {
@@ -3356,6 +3462,9 @@ protected void startSnappyLocator() {
       String dest = log.getCanonicalPath() + File.separator + "snappyLocatorSystem.log";
       File logFile = new File(dest);
       snappyTest.executeProcess(pb, logFile);
+      sleepForMs(30);
+      for(int i = 0; i< hostNames.size(); i++)
+        recordSnappyProcessIDinNukeRun(hostNames.get(i), "LocatorLauncher");
     } catch (IOException e) {
       String s = "problem occurred while retriving logFile path " + log;
       throw new TestException(s, e);
@@ -3364,6 +3473,7 @@ protected void startSnappyLocator() {
 
   protected void startSnappyServer() {
     File log = null;
+    List<String> hostNames = getHostNameFromConf("servers");
     ProcessBuilder pb = null;
     try {
       if (useRowStore) {
@@ -3382,14 +3492,26 @@ protected void startSnappyServer() {
         /*Thread.sleep(60000);
         startSnappyServer();*/
       }
+      sleepForMs(30);
+      for(int i = 0; i< hostNames.size(); i++)
+        recordSnappyProcessIDinNukeRun(hostNames.get(i), "ServerLauncher");
     } catch (IOException e) {
       String s = "problem occurred while retriving logFile path " + log;
       throw new TestException(s, e);
     }
   }
 
+  public static void sleepForMs(int sleepTimeInSec){
+    try {
+      Thread.sleep(sleepTimeInSec * 1000);
+    } catch (InterruptedException ie) {
+      throw new TestException("Got exception while thread was sleeping..", ie);
+    }
+  }
+
   protected void startSnappyLead() {
     File log = null;
+    List<String> hostNames = getHostNameFromConf("leads");
     try {
       ProcessBuilder pb = new ProcessBuilder(snappyTest.getScriptLocation("snappy-leads.sh"),
           "start");
@@ -3397,6 +3519,9 @@ protected void startSnappyLead() {
       String dest = log.getCanonicalPath() + File.separator + "snappyLeaderSystem.log";
       File logFile = new File(dest);
       snappyTest.executeProcess(pb, logFile);
+      sleepForMs(30);
+      for(int i = 0; i< hostNames.size(); i++)
+        recordSnappyProcessIDinNukeRun(hostNames.get(i), "LeaderLauncher");
     } catch (IOException e) {
       String s = "problem occurred while retriving logFile path " + log;
       throw new TestException(s, e);
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/stopDualModeCluster.conf b/dtests/src/test/java/io/snappydata/hydra/cluster/stopDualModeCluster.conf
index 957dd8247c..d2c316b171 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/stopDualModeCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/stopDualModeCluster.conf
@@ -18,8 +18,8 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             runMode = always
             threadGroups = snappyThreads;
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappy
-            threadGroups = snappyThreads;
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = snappy1;
 
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSparkCluster
             clientNames = snappy1;
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/stopEmbeddedModeCluster.conf b/dtests/src/test/java/io/snappydata/hydra/cluster/stopEmbeddedModeCluster.conf
index 9463018e9c..3cac782537 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/stopEmbeddedModeCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/stopEmbeddedModeCluster.conf
@@ -1,8 +1,8 @@
 hydra.Prms-testRequirement = "Test to stop the snappy embedded mode cluster started using startEmbeddedModeCluster.conf";
 hydra.Prms-testDescription = "This test stops the snappy embedded mode cluster started using startEmbeddedModeCluster.conf.";
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappy
-            threadGroups = snappyThreads;
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = locator1;
 
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
             clientNames = locator1;
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/stopRowStoreCluster.conf b/dtests/src/test/java/io/snappydata/hydra/cluster/stopRowStoreCluster.conf
index ba8f0e36a5..dd5e927ea9 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/stopRowStoreCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/stopRowStoreCluster.conf
@@ -11,11 +11,14 @@ INITTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  =
   runMode = always
   threadGroups = snappyThreads;
 
-CLOSETASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyServers
-  threadGroups = snappyThreads;
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = locator1;
 
-CLOSETASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyLocator
-  threadGroups = snappyThreads;
+ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyServers
+  clientNames = locator1;
+
+ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyLocator
+  clientNames = locator1;
 
 io.snappydata.hydra.cluster.SnappyPrms-isStopMode = true;
 //io.snappydata.hydra.cluster.SnappyPrms-useRowStore = true;
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/stopSnappyCluster.conf b/dtests/src/test/java/io/snappydata/hydra/cluster/stopSnappyCluster.conf
index 212106221a..3649999f7f 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/stopSnappyCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/stopSnappyCluster.conf
@@ -23,11 +23,14 @@ CLOSETASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  =
 CLOSETASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyLocator
   threadGroups = snappyThreads;*/
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
-            threadGroups = snappyThreads;
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = locator1;
+
+ENDTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
+            clientNames = locator1;
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_deleteSnappyConfig
-            threadGroups = snappyThreads;
+ENDTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_deleteSnappyConfig
+            clientNames = locator1;
 
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_cleanUpSnappyProcessesOnFailure
             clientNames = locator1;
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf
index 3480bbdec8..fc130d4fae 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf
@@ -1,11 +1,11 @@
 hydra.Prms-testRequirement = "Test to stop the snappy embedded and split mode cluster started using startDualModeCluster.conf";
 hydra.Prms-testDescription = "This test stops the snappy embedded as well as split mode cluster started using startDualModeCluster.conf.";
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappy
-            threadGroups = snappyThreads;
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = locator1;
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSparkCluster
-            threadGroups = snappyThreads;
+ENDTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSparkCluster
+            clientNames = locator1;
 
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
             clientNames = locator1;
diff --git a/dtests/src/test/java/io/snappydata/hydra/security/stopSnappyCluster.conf b/dtests/src/test/java/io/snappydata/hydra/security/stopSnappyCluster.conf
index 5e8ef7c627..c8e4761112 100644
--- a/dtests/src/test/java/io/snappydata/hydra/security/stopSnappyCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/security/stopSnappyCluster.conf
@@ -1,8 +1,8 @@
 hydra.Prms-testRequirement = "Test to stop the snappy embedded and split mode cluster started using startDualModeCluster.conf";
 hydra.Prms-testDescription = "This test stops the snappy embedded as well as split mode cluster started using startDualModeCluster.conf.";
 
-CLOSETASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappy
-            threadGroups = snappyThreads;
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = locator1;
 
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
             clientNames = locator1;
diff --git a/dtests/src/test/java/io/snappydata/hydra/storeTests/stopSnappyCluster.inc b/dtests/src/test/java/io/snappydata/hydra/storeTests/stopSnappyCluster.inc
index 9e26104166..57ddbc9b76 100644
--- a/dtests/src/test/java/io/snappydata/hydra/storeTests/stopSnappyCluster.inc
+++ b/dtests/src/test/java/io/snappydata/hydra/storeTests/stopSnappyCluster.inc
@@ -1,3 +1,6 @@
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_dumpStacks
+            clientNames = locator1;
+
 ENDTASK     taskClass   = sql.snappy.SnappyTest taskMethod  = HydraTask_stopSnappyCluster
             clientNames = locator1;
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 8e17ae35bb..abcd841a96 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -17,12 +17,12 @@
 
 package io.snappydata.hydra
 
-import java.io.{File, PrintWriter}
+import java.io.{BufferedReader, File, FileNotFoundException, FileReader, IOException, PrintWriter}
 
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.encoders.RowEncoder
 
-import scala.io.Source
+import java.util
 
 object SnappyTestUtils {
 
@@ -141,22 +141,23 @@ object SnappyTestUtils {
 
       try {
         if (!snappyFile.exists()) {
-          val snap_col1 = snappyDF.schema.fieldNames(0)
-          val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-          snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
+          // val snap_col1 = snappyDF.schema.fieldNames(0)
+          // val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
+          snappyDF = snappyDF.repartition(1) // .sortWithinPartitions(snap_col1, snap_col: _*)
           writeToFile(snappyDF, snappyDest, snc)
           // writeResultSetToCsv(snappyDF, snappyFile)
           pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
         }
         if (!sparkFile.exists()) {
-          val col1 = sparkDF.schema.fieldNames(0)
-          val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-          sparkDF = sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
+          // val col1 = sparkDF.schema.fieldNames(0)
+          // val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
+          sparkDF = sparkDF.repartition(1) // .sortWithinPartitions(col1, col: _*)
           writeToFile(sparkDF, sparkDest, snc)
           // writeResultSetToCsv(sparkDF, sparkFile)
           pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
         }
-        fullRSValidationFailed = compareFiles(snappyFile, sparkFile, pw, fullRSValidationFailed)
+        fullRSValidationFailed = compareFiles(snappyFile, sparkFile, pw, queryNum,
+           fullRSValidationFailed)
       } catch {
         case ex: Exception => {
           fullRSValidationFailed = true
@@ -246,15 +247,75 @@ object SnappyTestUtils {
    */
   def isIgnorable(actualLine: String, expectedLine: String): Boolean = {
     var canBeIgnored = false
+    if ((actualLine != null && actualLine.size > 0) && (expectedLine != null && expectedLine.size
+     > 0)) {
+      val actualArray = actualLine.split(",")
+      val expectedArray = expectedLine.split(",")
+      var diff: Double = 0.0
+      if(actualArray.length != expectedArray.length){
+        canBeIgnored = false
+      } else {
+        for (i <- 0 to actualArray.length) {
+          val value1 = actualArray(i).toDouble
+          val value2 = expectedArray(i).toDouble
+          if(value1 > value2) diff = value1.-(value2).doubleValue
+          else diff = value2.-(value1).doubleValue
+          println("diff is " + diff)
+          if (diff <= 0.01) canBeIgnored = true
+        }
+      }
+    }
     return canBeIgnored
   }
 
-  def compareFiles(snappyFile: File, sparkFile: File, pw: PrintWriter, validationFailed: Boolean):
-  Boolean = {
+  def executeProcess(pb: ProcessBuilder, logFile: File, pw: PrintWriter): Int = {
+    var p: Process = null
+    try {
+      if (logFile != null) {
+        pb.redirectErrorStream(true)
+        pb.redirectError(ProcessBuilder.Redirect.PIPE)
+        pb.redirectOutput(ProcessBuilder.Redirect.appendTo(logFile))
+      }
+      p = pb.start
+      if (logFile != null) {
+        assert(pb.redirectInput eq ProcessBuilder.Redirect.PIPE)
+        assert(pb.redirectOutput.file eq logFile)
+        assert(p.getInputStream.read == -1)
+      }
+      val rc = p.waitFor
+      if ((rc == 0) || (pb.command.contains("grep") && rc == 1)) {
+        pw.println("Process executed successfully")
+        0
+      }
+      else {
+        pw.println("Process execution failed with exit code: " + rc)
+        1
+      }
+    } catch {
+      case e: IOException =>
+        pw.println("Exception occurred while starting the process:" + pb + "\nError Message:" + e
+            .getMessage)
+        1
+      case e: InterruptedException =>
+        pw.println("Exception occurred while waiting for the process execution:" + p + "\nError " +
+            "Message:" + e.getMessage)
+        1
+    }
+  }
+
+  def compareFiles(snappyFile: File, sparkFile: File, pw: PrintWriter, queryNum:
+  String, validationFailed: Boolean)
+  : Boolean = {
     var hasValidationFailed = validationFailed
 
     val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
     val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
+
+    hasValidationFailed = compareFiles(getQueryResultDir("snappyQueryFiles"), actualFile.iterator
+        .next().getAbsolutePath, expectedFile.iterator.next().getAbsolutePath,
+      pw, queryNum, hasValidationFailed)
+
+    /*
     val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
     val actualLineSet = Source.fromFile(actualFile.iterator.next()).getLines()
 
@@ -262,10 +323,14 @@ object SnappyTestUtils {
       val expectedLine = expectedLineSet.next()
       val actualLine = actualLineSet.next()
       if (!actualLine.equals(expectedLine)) {
-        isIgnorable(actualLine, expectedLine)
-        hasValidationFailed = true
-        pw.println(s"Expected Result : $expectedLine")
-        pw.println(s"Actual Result   : $actualLine")
+        if (!isIgnorable(actualLine, expectedLine)) {
+          hasValidationFailed = true
+          pw.println(s"Expected Result : $expectedLine")
+          pw.println(s"Actual Result   : $actualLine")
+        } else {
+          hasValidationFailed = false
+        }
+
       }
     }
     // scalastyle:off println
@@ -284,6 +349,91 @@ object SnappyTestUtils {
         }
       }
     }
+    */
+    hasValidationFailed
+  }
+
+  def compareFiles(dir: String, snappyResultsFile: String, sparkResultsFile: String,
+      pw: PrintWriter, queryNum: String, hasValidationFailed: Boolean): Boolean = {
+    val aStr = new StringBuilder
+    var pb: ProcessBuilder = null
+    var command: String = null
+    val missingFileName = dir + File.separator + "missing_" + queryNum + ".txt"
+    val unexpectedFileName = dir + File.separator + "unexpected_" + queryNum + ".txt"
+    try {
+      var writer = new PrintWriter(missingFileName)
+      writer.print("")
+      writer.close()
+      writer = new PrintWriter(unexpectedFileName)
+      writer.print("")
+      writer.close()
+    } catch {
+      case fe: FileNotFoundException =>
+        pw.println("Log exception while overwirting the result mismatch files", fe)
+        false
+    }
+    val unexpectedResultsFile = new File(unexpectedFileName)
+    val missingResultsFile = new File(missingFileName)
+    command = "grep -v -F -x -f " + sparkResultsFile + " " + snappyResultsFile
+    pb = new ProcessBuilder("/bin/bash", "-c", command)
+    pw.println("Executing command : " + command)
+    // get the unexpected rows in snappy
+    executeProcess(pb, unexpectedResultsFile, pw)
+    command = "grep -v -F -x -f " + snappyResultsFile + " " + sparkResultsFile
+    pb = new ProcessBuilder("/bin/bash", "-c", command)
+    pw.println("Executing command : " + command)
+    // get the missing rows in snappy
+    executeProcess(pb, missingResultsFile, pw)
+    var unexpectedRsReader: BufferedReader = null
+    var missingRsReader: BufferedReader = null
+    try {
+      unexpectedRsReader = new BufferedReader(new FileReader(unexpectedResultsFile))
+      missingRsReader = new BufferedReader(new FileReader(missingResultsFile))
+    } catch {
+      case fe: FileNotFoundException =>
+        pw.println("Could not find file to compare results.", fe)
+        false
+    }
+    var line: String = null
+    val unexpected = new util.ArrayList[String]
+    val missing = new util.ArrayList[String]
+    try {
+      while ( {
+        (line = unexpectedRsReader.readLine) != null
+      }) unexpected.add("\n  " + line)
+      while ( {
+        (line = missingRsReader.readLine) != null
+      }) missing.add("\n  " + line)
+      unexpectedRsReader.close()
+      missingRsReader.close()
+    } catch {
+      case ie: IOException =>
+        pw.println("Got exception while reading resultset files", ie)
+    }
+    if (missing.size > 0) {
+      if (missing.size < 20) {
+        aStr.append("\nThe following " + missing.size + " rows are missing from snappy resultset:")
+        aStr.append(missing.toString)
+      }
+      else {
+        aStr.append("There are " + missing.size + " rows missing in snappy for " + queryNum + "." +
+            " " + "Please check " + missingFileName)
+      }
+      aStr.append("\n")
+    }
+    if (unexpected.size > 0) {
+      if (unexpected.size < 20) {
+        aStr.append("\nThe following " + unexpected.size +
+            " rows from snappy resultset are unexpected: ")
+        aStr.append(unexpected.toString)
+      }
+      else {
+        aStr.append("There are " + unexpected.size + " rows unexpected in snappy for " + queryNum +
+            ". Please check " + unexpectedFileName)
+      }
+      aStr.append("\n")
+    }
+    pw.println(aStr.toString)
     hasValidationFailed
   }
 
@@ -349,7 +499,7 @@ object SnappyTestUtils {
       } else {
         pw.println(s"zero results in query $queryNum.")
       }
-      hasValidationFailed = compareFiles(snappyFile, sortedGoldenFile, pw,
+      hasValidationFailed = compareFiles(snappyFile, sortedGoldenFile, pw, queryNum,
         hasValidationFailed)
 
     } catch {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWQueries.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWQueries.scala
index e2e6eb4360..1c0d2674a0 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWQueries.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWQueries.scala
@@ -851,82 +851,39 @@ object NWQueries {
     "Q23" -> Q23,
     "Q24" -> Q24,
     "Q25" -> Q25,
-    "Q25_1" -> Q25_1,
-    "Q25_2" -> Q25_2,
     "Q26" -> Q26,
-    "Q26_1" -> Q26_1,
-    "Q26_2" -> Q26_2,
     "Q27" -> Q27,
-    "Q27_1" -> Q27_1,
-    "Q27_2" -> Q27_2,
-    "Q27_3" -> Q27_3,
-    "Q27_4" -> Q27_4,
     "Q28" -> Q28,
-    "Q28_1" -> Q28_1,
-    "Q28_2" -> Q28_2,
     "Q29" -> Q29,
-    "Q29_1" -> Q29_1,
-    "Q29_2" -> Q29_2,
     "Q30" -> Q30,
-    "Q30_1" -> Q30_1,
-    "Q30_2" -> Q30_2,
     "Q61" -> Q61,
     "Q62" -> Q62,
     "Q31" -> Q31,
-    "Q31_1" -> Q31_1,
-    "Q31_2" -> Q31_2,
-    "Q31_3" -> Q31_3,
-    "Q31_4" -> Q31_4,
     "Q32" -> Q32,
-    "Q32_1" -> Q32_1,
     "Q33" -> Q33,
-    "Q33_1" -> Q33_1,
     "Q34" -> Q34,
-    "Q34_1" -> Q34_1,
-    "Q34_2" -> Q34_2,
     "Q35" -> Q35,
-    "Q35_1" -> Q35_1,
-    "Q35_2" -> Q35_2,
     "Q36" -> Q36,
-    "Q36_1" -> Q36_1,
-    "Q36_2" -> Q36_2,
     "Q37" -> Q37,
     "Q38" -> Q38,
-    "Q38_1" -> Q38_1,
-    "Q38_2" -> Q38_2,
     "Q39" -> Q39,
     "Q40" -> Q40,
-    "Q40_1" -> Q40_1,
-    "Q40_2" -> Q40_2,
     "Q41" -> Q41,
     "Q42" -> Q42,
-    "Q42_1" -> Q42_1,
-    "Q42_2" -> Q42_2,
     "Q43" -> Q43,
-    "Q43_1" -> Q43_1,
-    "Q43_2" -> Q43_2,
     "Q44" -> Q44,
     "Q45" -> Q45,
     "Q46" -> Q46,
     "Q47" -> Q47,
     "Q48" -> Q48,
     "Q49" -> Q49,
-    "Q49_1" -> Q49_1,
-    "Q49_2" -> Q49_2,
     "Q50" -> Q50,
     "Q51" -> Q51,
-    "Q51_1" -> Q51_1,
-    "Q51_2" -> Q51_2,
     "Q52" -> Q52,
     "Q53" -> Q53,
     "Q54" -> Q54,
     "Q55" -> Q55,
-    "Q55_1" -> Q55_1,
-    "Q55_2" -> Q55_2,
     "Q56" -> Q56,
-    "Q56_1" -> Q56_1,
-    "Q56_2" -> Q56_2,
-    "Q56_3" -> Q56_3,
     "Q57" -> Q57,
     "Q58" -> Q58,
     "Q59" -> Q59,
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index 0e5750df02..86fd8ab2bb 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -83,95 +83,95 @@ object NWTestUtil {
           startTime) / 1000) + " seconds")
     }
     for (q <- NWQueries.queries) {
-      var queryExecuted = true;
+      var queryExecuted = false;
       var hasValidationFailed = false;
 
       if (!executeQueriesByChangingConstants) {
         q._1 match {
           case "Q1" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q1, 8, "Q1",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q2" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q2, 91,
-            "Q2", pw, sqlContext)
+            "Q2", pw, sqlContext); queryExecuted = true
           case "Q3" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q3, 830,
-            "Q3", pw, sqlContext)
+            "Q3", pw, sqlContext); queryExecuted = true
           case "Q4" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q4, 9, "Q4",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q5" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q5, 9, "Q5",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q6" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q6, 9, "Q6",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q7" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q7, 9, "Q7",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q8" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q8, 6, "Q8",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q9" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q9, 3, "Q9",
-            pw, sqlContext)
+            pw, sqlContext); queryExecuted = true
           case "Q10" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q10, 2,
-            "Q10", pw, sqlContext)
+            "Q10", pw, sqlContext); queryExecuted = true
           case "Q11" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q11, 4,
-            "Q11", pw, sqlContext)
+            "Q11", pw, sqlContext); queryExecuted = true
           case "Q12" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q12, 2,
-            "Q12", pw, sqlContext)
+            "Q12", pw, sqlContext); queryExecuted = true
           case "Q13" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q13, 2,
-            "Q13", pw, sqlContext)
+            "Q13", pw, sqlContext); queryExecuted = true
           case "Q14" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q14, 69,
-            "Q14", pw, sqlContext)
+            "Q14", pw, sqlContext); queryExecuted = true
           case "Q15" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q15, 5,
-            "Q15", pw, sqlContext)
+            "Q15", pw, sqlContext); queryExecuted = true
           case "Q16" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q16, 8,
-            "Q16", pw, sqlContext)
+            "Q16", pw, sqlContext); queryExecuted = true
           case "Q17" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q17, 3,
-            "Q17", pw, sqlContext)
+            "Q17", pw, sqlContext); queryExecuted = true
           case "Q18" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q18, 9,
-            "Q18", pw, sqlContext)
+            "Q18", pw, sqlContext); queryExecuted = true
           case "Q19" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q19, 13,
-            "Q19", pw, sqlContext)
+            "Q19", pw, sqlContext); queryExecuted = true
           case "Q20" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q20, 1,
-            "Q20", pw, sqlContext)
+            "Q20", pw, sqlContext); queryExecuted = true
           case "Q21" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q21, 1,
-            "Q21", pw, sqlContext)
+            "Q21", pw, sqlContext); queryExecuted = true
           case "Q22" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q22, 1,
-            "Q22", pw, sqlContext)
+            "Q22", pw, sqlContext); queryExecuted = true
           case "Q23" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q23, 1,
-            "Q23", pw, sqlContext)
+            "Q23", pw, sqlContext); queryExecuted = true
           case "Q24" => hasValidationFailed = SnappyTestUtils.assertQuery(snc, NWQueries.Q24, 4,
-            "Q24", pw, sqlContext)
+            "Q24", pw, sqlContext); queryExecuted = true
           case "Q37" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q37, 77,
-            "Q37", pw, sqlContext)
+            "Q37", pw, sqlContext); queryExecuted = true
           case "Q39" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q39, 9,
-            "Q39", pw, sqlContext)
+            "Q39", pw, sqlContext); queryExecuted = true
           case "Q41" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q41, 2155,
-            "Q41", pw, sqlContext)
+            "Q41", pw, sqlContext); queryExecuted = true
           case "Q44" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q44, 830,
-            "Q44", pw, sqlContext) // LeftSemiJoinHash
+            "Q44", pw, sqlContext); queryExecuted = true // LeftSemiJoinHash
           case "Q45" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q45,
-            1788650, "Q45", pw, sqlContext)
+            1788650, "Q45", pw, sqlContext); queryExecuted = true
           case "Q46" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q46,
-            1788650, "Q46", pw, sqlContext)
+            1788650, "Q46", pw, sqlContext); queryExecuted = true
           case "Q47" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q47,
-            1788650, "Q47", pw, sqlContext)
+            1788650, "Q47", pw, sqlContext); queryExecuted = true
           case "Q48" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q48,
-            1788650, "Q48", pw, sqlContext)
+            1788650, "Q48", pw, sqlContext); queryExecuted = true
           case "Q50" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q50,
-            2155, "Q50", pw, sqlContext)
+            2155, "Q50", pw, sqlContext); queryExecuted = true
           case "Q52" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q52,
-            2155, "Q52", pw, sqlContext)
+            2155, "Q52", pw, sqlContext); queryExecuted = true
           case "Q53" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q53,
-            2155, "Q53", pw, sqlContext)
+            2155, "Q53", pw, sqlContext); queryExecuted = true
           case "Q54" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q54,
-            2155, "Q54", pw, sqlContext)
+            2155, "Q54", pw, sqlContext); queryExecuted = true
           case "Q57" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q57,
-            120, "Q57", pw, sqlContext)
+            120, "Q57", pw, sqlContext); queryExecuted = true
           case "Q58" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q58,
-            1, "Q58", pw, sqlContext)
+            1, "Q58", pw, sqlContext); queryExecuted = true
           case "Q59" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q59, 1,
-            "Q59", pw, sqlContext)
+            "Q59", pw, sqlContext); queryExecuted = true
           case "Q60" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q60, 947,
-            "Q60", pw, sqlContext)
+            "Q60", pw, sqlContext); queryExecuted = true
           case "Q61" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q61, 480,
-            "Q61", pw, sqlContext)
+            "Q61", pw, sqlContext); queryExecuted = true
           case "Q62" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q62,
-            480, "Q62", pw, sqlContext)
+            480, "Q62", pw, sqlContext); queryExecuted = true
           case _ => // do nothing
         }
       }
@@ -183,7 +183,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q25_2, 1, "Q25_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q26" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q26, 86,
           "Q26", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -191,7 +191,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q26_2, 60, "Q26_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q27" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27, 9, "Q27",
           pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -203,7 +203,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27_4, 6, "Q27_4", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q28" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q28, 12,
           "Q28", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -211,7 +211,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q28_2, 5, "Q28_2",
               pw, sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q29" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q29, 8,
           "Q29", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -219,7 +219,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q29_2, 6, "Q29_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q30" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q30, 8, "Q30",
           pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -227,7 +227,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q30_2, 6, "Q30_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q31" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31, 830,
           "Q31", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -239,19 +239,19 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31_4, 484, "Q31_4", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q32" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q32, 8, "Q32",
           pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q32_1, 282, "Q32_1", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q33" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q33, 37,
           "Q33", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q33_1, 769, "Q33_1",
               pw, sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q34" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q34, 5,
           "Q34", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -259,7 +259,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q34_2, 4, "Q34_2",
               pw, sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q35" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q35, 3, "Q35",
           pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -267,7 +267,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q35_2, 3, "Q35_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q36" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q36, 290,
           "Q36", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -275,7 +275,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q36_2, 61, "Q36_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q38" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q38, 2155,
           "Q38", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -283,8 +283,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q38_2, 2041, "Q38_2",
               pw, sqlContext, usePlanCaching)
-          }
-
+          }; queryExecuted = true
         case "Q40" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q40, 830,
           "Q40", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -292,7 +291,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q40_2, 9, "Q40_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q42" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q42, 22,
           "Q42", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -300,7 +299,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q42_2, 7, "Q42_2",
               pw, sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q43" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q43, 830,
           "Q43", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -308,7 +307,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q43_2, 2, "Q43_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q49" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q49, 1788650,
           "Q49", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -316,7 +315,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q49_2, 1741240, "Q49_2",
               pw, sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q51" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q51, 2155,
           "Q51", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -324,7 +323,7 @@ object NWTestUtil {
               pw, sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q51_2, 2041, "Q51_2",
               pw, sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q55" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q55, 21,
           "Q55", pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -332,7 +331,7 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q55_2, 6, "Q55_2", pw,
               sqlContext, usePlanCaching)
-          }
+          }; queryExecuted = true
         case "Q56" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56",
           pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
@@ -342,13 +341,13 @@ object NWTestUtil {
               sqlContext, usePlanCaching)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56_3", pw,
               sqlContext, usePlanCaching)
-          }
-        case _ =>
-          pw.println(s"Did not execute ${q._1}.")
-          queryExecuted = false
+          }; queryExecuted = true
+        case _ => // do nothing
       }
       if (queryExecuted) {
         pw.println(s"Execution completed for query ${q._1}")
+      } else {
+        pw.println(s"Did not execute ${q._1}.")
       }
       if (hasValidationFailed) {
         failedQueries = SnappyTestUtils.addToFailedQueryList(failedQueries, q._1)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
index 64bccddd15..30b2c9057f 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
@@ -18,16 +18,16 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.sql.{SQLContext, SnappyContext}
 import org.apache.spark.{SparkConf, SparkContext}
 
 object ValidateNWQueriesWithChangingConstantsApp {
 
   def main(args: Array[String]) {
-    val connectionURL = args(args.length - 1)
     val conf = new SparkConf().
-        setAppName("ValidateNWQueries Application_" + System.currentTimeMillis()).
-        set("snappydata.connection", connectionURL)
+        setAppName("ValidateNWQueries Application_" + System.currentTimeMillis())
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
@@ -38,6 +38,9 @@ object ValidateNWQueriesWithChangingConstantsApp {
     NWQueries.dataFilesLocation = dataFilesLocation
     val tableType = args(1)
     val threadID = Thread.currentThread().getId
+    SnappyTestUtils.validateFullResultSet = true
+    SnappyTestUtils.numRowsValidation = true
+    SnappyTestUtils.tableType = tableType
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
     val outputFile = "ValidateNWQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
index 9e607e0e63..819af80b2b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
@@ -21,7 +21,7 @@ import java.io.{File, FileOutputStream, PrintWriter}
 import scala.util.{Failure, Success, Try}
 
 import com.typesafe.config.Config
-import io.snappydata.hydra.northwind
+import io.snappydata.hydra.{SnappyTestUtils, northwind}
 
 import org.apache.spark.SparkContext
 import org.apache.spark.sql._
@@ -38,6 +38,9 @@ class ValidateNWQueriesWithChangingConstantsJob extends SnappySQLJob {
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
     val sc = SparkContext.getOrCreate()
     val sqlContext = SQLContext.getOrCreate(sc)
+    SnappyTestUtils.validateFullResultSet = true
+    SnappyTestUtils.numRowsValidation = true
+    SnappyTestUtils.tableType = tableType
     Try {
       snc.sql("set spark.sql.shuffle.partitions=23")
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")

From 1cde1683f96b18c35739c81bf5477a3062844309 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Wed, 26 Sep 2018 16:13:33 +0530
Subject: [PATCH 15/33] - Minor fixes in the validation code.

---
 .../snappydata/hydra/cluster/SnappyTest.java  |  4 +--
 .../io/snappydata/hydra/SnappyTestUtils.scala | 25 ++++++++-----------
 2 files changed, 13 insertions(+), 16 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index e329eee28e..7d0060f25d 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -61,8 +61,8 @@ public class SnappyTest implements Serializable {
   protected static SnappyTest snappyTest;
   protected static HostDescription hd = TestConfig.getInstance().getMasterDescription()
       .getVmDescription().getHostDescription();
-  protected static HostDescription localhd = TestConfig.getInstance().getClientDescription
-      (RemoteTestModule.getMyClientName()).getVmDescription().getHostDescription();
+  /*protected static HostDescription localhd = TestConfig.getInstance().getClientDescription
+      (RemoteTestModule.getMyClientName()).getVmDescription().getHostDescription();*/
   protected static char sep = hd.getFileSep();
   private static String gemfireHome = hd.getGemFireHome() + sep;
   protected static String productDir = gemfireHome + ".." + sep + "snappy" + sep;
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index abcd841a96..714238757c 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -21,9 +21,10 @@ import java.io.{BufferedReader, File, FileNotFoundException, FileReader, IOExcep
 
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.encoders.RowEncoder
-
 import java.util
 
+import scala.io.Source
+
 object SnappyTestUtils {
 
   var validateFullResultSet: Boolean = false;
@@ -384,28 +385,24 @@ object SnappyTestUtils {
     pw.println("Executing command : " + command)
     // get the missing rows in snappy
     executeProcess(pb, missingResultsFile, pw)
-    var unexpectedRsReader: BufferedReader = null
-    var missingRsReader: BufferedReader = null
+    var unexpectedRsReader: Iterator[String] = null
+    var missingRsReader: Iterator[String] = null
     try {
-      unexpectedRsReader = new BufferedReader(new FileReader(unexpectedResultsFile))
-      missingRsReader = new BufferedReader(new FileReader(missingResultsFile))
+      unexpectedRsReader = Source.fromFile(unexpectedResultsFile).getLines()
+      missingRsReader = Source.fromFile(missingResultsFile).getLines()
     } catch {
       case fe: FileNotFoundException =>
         pw.println("Could not find file to compare results.", fe)
         false
     }
-    var line: String = null
+    var line: String = ""
     val unexpected = new util.ArrayList[String]
     val missing = new util.ArrayList[String]
     try {
-      while ( {
-        (line = unexpectedRsReader.readLine) != null
-      }) unexpected.add("\n  " + line)
-      while ( {
-        (line = missingRsReader.readLine) != null
-      }) missing.add("\n  " + line)
-      unexpectedRsReader.close()
-      missingRsReader.close()
+      while (unexpectedRsReader.hasNext)
+        unexpected.add("\n  " + unexpectedRsReader.next())
+      while (missingRsReader.hasNext)
+        missing.add("\n  " + missingRsReader.next())
     } catch {
       case ie: IOException =>
         pw.println("Got exception while reading resultset files", ie)

From fe2f2b2b2279703f462a162d91abc7dcb613023a Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 29 Oct 2018 16:34:01 +0530
Subject: [PATCH 16/33] - test changes to have thread dump only on test failure
 or hang. - fixing minor issue with data validation in case of snappyjob and
 spark app. - Adding method for finding suspectStrings.

---
 .../snappydata/hydra/cluster/SnappyPrms.java  |  32 +++++
 .../snappydata/hydra/cluster/SnappyTest.java  | 129 +++++++++++-------
 .../hydra/cluster/expectedExceptionList.inc   |  30 ++++
 .../hydra/northwind/startDualModeCluster.conf |   2 +
 .../hydra/northwind/stopDualModeCluster.conf  |   3 +
 .../io/snappydata/hydra/SnappyTestUtils.scala |  84 +++++++-----
 6 files changed, 196 insertions(+), 84 deletions(-)
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
index 161e219c65..758ef76c42 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
@@ -622,6 +622,38 @@ public static int getNumOfStackDumpItrs(){
     return tasktab().intAt(key, tab().intAt(key, 3));
   }
 
+  /**
+   * Parameter used to get the List of expected exceptions in the test run.
+   * Array of values for expected exceptions
+   */
+  public static Long expectedExceptions;
+
+  public static String[] getExpectedExceptionList() {
+    Long key = expectedExceptions;
+    Vector statements = tab().vecAt(key, new HydraVector());
+    String[] strArr = new String[statements.size()];
+    for (int i = 0; i < statements.size(); i++) {
+      strArr[i] = (String)statements.elementAt(i);
+    }
+    return strArr;
+  }
+
+  /**
+   * Parameter used to get the List of expected exceptions in the test run in case of HA.
+   * Array of values for expected exceptions
+   */
+  public static Long expectedExceptionsForHA;
+
+  public static String[] getExpectedExceptionListForHA() {
+    Long key = expectedExceptionsForHA;
+    Vector statements = tab().vecAt(key, new HydraVector());
+    String[] strArr = new String[statements.size()];
+    for (int i = 0; i < statements.size(); i++) {
+      strArr[i] = (String)statements.elementAt(i);
+    }
+    return strArr;
+  }
+
   /**
    * Sleep time in secs between 2 thread dumps.
    */
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 8bfe695443..e210831d1c 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -1816,17 +1816,6 @@ public int executeProcess(ProcessBuilder pb, File logFile) {
     }
   }
 
-  public static String getCurrentDirPath(){
-    String currentDir;
-    try {
-      currentDir = new File(".").getCanonicalPath();
-    } catch (IOException e) {
-      String s = "Problem while accessing the current dir.";
-      throw new TestException(s, e);
-    }
-    return currentDir;
-  }
-
   protected synchronized void recordSnappyProcessIDinNukeRun(String pName) {
     Process pr = null;
     try {
@@ -2767,26 +2756,66 @@ public static synchronized void HydraTask_createAndStartSnappyLocator() {
     }
   }
 
-  /*Dump stacks for threads in members of snappy cluster*/
+  /*
+  * Dump stacks for threads in members of snappy cluster
+  */
+
   public static synchronized void HydraTask_dumpStacks() {
-    initSnappyArtifacts();
-    int dumpItr = SnappyPrms.getNumOfStackDumpItrs();
-    for (int i = 0; i < dumpItr; i++) {
-      snappyTest.dumpStacks();
-      if (i < (dumpItr-1)) {
-        sleepForMs(SnappyPrms.getSleepBtwnStackDumps());
+    snappyTest.dumpStacks();
+  }
+
+  public void dumpStacks() {
+    File currDir = new File(".");
+    boolean checkErrors = new File(currDir, "errors.txt").exists();
+    boolean checkHang = new File(currDir, "hang.txt").exists();
+    if (checkErrors || checkHang) {
+      int dumpItr = SnappyPrms.getNumOfStackDumpItrs();
+      Set pids = getPidList();
+      for (int i = 0; i < dumpItr; i++) {
+        getThreadDump(pids);
+        if (i < (dumpItr - 1)) {
+          sleepForMs(SnappyPrms.getSleepBtwnStackDumps());
+        }
       }
+    } else {
+      Log.getLogWriter().info("Test has no failures. Hence no need to take process stack dumps.");
     }
   }
 
-  public void dumpStacks() {
-    Set pids = getPidList();
-    Iterator itr = pids.iterator();
-    while(itr.hasNext()) {
-      String val = (String)itr.next();
-      int pid = Integer.parseInt(val);
-      String host = getPidHost(val);
-      ProcessMgr.printProcessStacks(host,pid);
+  /*
+  * Check if there is any suspect strings in the test. To be executed at the end of test.
+  */
+  public static synchronized void HydraTask_checkSuspectStrings() {
+    snappyTest.checkSuspectStrings();
+  }
+
+  public void checkSuspectStrings() {
+    Process pr = null;
+    String checkSuspectOutPut = getCurrentDirPath() + File.separator + "suspectStrings.txt";
+    File suspectStringFile = new File(checkSuspectOutPut);
+    StringBuilder cmd = new StringBuilder();
+    cmd.append("grep -r Exception " + getCurrentDirPath()).append(" | grep .log");
+    String[] expectedExceptions = SnappyPrms.getExpectedExceptionList();
+    if(cycleVms) {
+
+      List<String> exceptions = Arrays.asList(expectedExceptions);
+      exceptions.addAll(Arrays.asList(SnappyPrms.getExpectedExceptionListForHA()));
+      expectedExceptions = (String[])exceptions.toArray();
+    }
+    for (int i = 0; i < expectedExceptions.length; i++) {
+      cmd.append(" | grep -v ").append(expectedExceptions[i]);
+    }
+    Log.getLogWriter().info("grep command is : " + cmd);
+    ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", cmd.toString());
+    pb.redirectErrorStream(true);
+    pb.redirectOutput(ProcessBuilder.Redirect.appendTo(suspectStringFile));
+    try {
+      pr = pb.start();
+      pr.waitFor();
+    } catch (IOException ie) {
+
+    } catch (InterruptedException ie) {
+
     }
   }
 
@@ -3238,30 +3267,20 @@ protected void killVM(String vmDir, String clientName, String vmName) {
     Log.getLogWriter().info(clientName + " stopped successfully...");
   }
 
-  protected static void threadDumpForAllServers() {
-    Set<String> pidList;
+  protected void getThreadDump(Set<String> pidList) {
+    String thrDumpScript = getCurrentDirPath() + File.separator + "threadDump.sh";
     Process pr = null;
     ProcessBuilder pb;
-    File logFile, log = null, serverHeapDumpOutput;
+    File logFile = new File(thrDumpScript);
+    String thrDumpLog = getCurrentDirPath() + File.separator + "threadDump.log";
+    File thrDumpOutput = new File(thrDumpLog);
     try {
-      HostDescription hd = TestConfig.getInstance().getMasterDescription()
-          .getVmDescription().getHostDescription();
-      pidList = SnappyStartUpTest.getServerPidList();
-      pidList.addAll(SnappyStartUpTest.getLeaderPidList());
-      log = new File(".");
-      String server = log.getCanonicalPath() + File.separator + "threadDumpAllServers.sh";
-      logFile = new File(server);
-      String serverKillLog = log.getCanonicalPath() + File.separator +
-          "serversThreadDumpStopFailure.log";
-      serverHeapDumpOutput = new File(serverKillLog);
       FileWriter fw = new FileWriter(logFile.getAbsoluteFile(), true);
       BufferedWriter bw = new BufferedWriter(fw);
-      List asList = new ArrayList(pidList);
-      Collections.shuffle(asList);
       for (String pidString : pidList) {
         int pid = Integer.parseInt(pidString);
-        String pidHost = snappyTest.getPidHost(Integer.toString(pid));
-        if (pidHost.equalsIgnoreCase("localhost")) {
+        String host = snappyTest.getPidHost(Integer.toString(pid));
+        if (host.equalsIgnoreCase("localhost")) {
           bw.write("kill -23 " + pid);
           bw.newLine();
           bw.write("sleep 4;");
@@ -3269,31 +3288,39 @@ protected static void threadDumpForAllServers() {
           bw.write("kill -3 " + pid);
         } else {
           bw.write("ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " +
-              pidHost + " kill -23 " + pid);
+              host + " kill -23 " + pid);
           bw.newLine();
           bw.write("sleep 4;");
           bw.newLine();
           bw.write("ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " +
-              pidHost + " kill -3 " + pid);
+              host + " kill -3 " + pid);
         }
         bw.newLine();
       }
       bw.close();
       fw.close();
       logFile.setExecutable(true);
-      pb = new ProcessBuilder(server);
+      pb = new ProcessBuilder(thrDumpScript);
       pb.redirectErrorStream(true);
-      pb.redirectOutput(ProcessBuilder.Redirect.appendTo(serverHeapDumpOutput));
+      pb.redirectOutput(ProcessBuilder.Redirect.appendTo(thrDumpOutput));
       pr = pb.start();
       pr.waitFor();
-    } catch (IOException e) {
-      throw new TestException("IOException occurred while retriving logFile path " + log + "\nError Message:" + e.getMessage());
-    } catch (InterruptedException e) {
+    } catch (IOException ie) {
+      throw new TestException("IOException occurred while retriving logFile path.\nError " +
+          "Message:" + ie.getMessage());
+    } catch (InterruptedException ie) {
       String s = "Exception occurred while waiting for the process execution : " + pr;
-      throw new TestException(s, e);
+      throw new TestException(s, ie);
     }
   }
 
+  protected static void threadDumpForAllServers() {
+    Set<String> pidList;
+    pidList = SnappyStartUpTest.getServerPidList();
+    pidList.addAll(SnappyStartUpTest.getLeaderPidList());
+    snappyTest.getThreadDump(pidList);
+  }
+
   protected boolean waitForMemberStop(String vmDir, String clientName, String vmName) {
     File commandOutput;
     try {
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
new file mode 100644
index 0000000000..22ceb91a4c
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
@@ -0,0 +1,30 @@
+io.snappydata.hydra.cluster.SnappyPrms-expectedExceptions =
+"java.net.BindException"
+"NoSuchObjectException"
+"NucleusObjectNotFoundException"
+"RegionDestroyedException"
+"DistributedSystemDisconnectedException"
+"newDisconnectedException"
+"CacheClosedException"
+"java.io.FileNotFoundException"
+"org.apache.spark.shuffle.FetchFailedException"
+"org.glassfish.jersey.server.internal.MappableExceptionWrapperInterceptor"
+"java.lang.reflect.InvocationTargetException"
+"org.apache.spark.storage.ShuffleBlockFetcherIterator.throwFetchFailedException"
+"org.apache.spark.SparkException: Exception thrown in awaitResult"
+"com.gemstone.gemfire.distributed.LockServiceDestroyedException"
+"Failed to retrieve information for"
+;
+
+io.snappydata.hydra.cluster.SnappyPrms-expectedExceptionsForHA =
+""
+""
+
+;
+
+/*
+io.snappydata.hydra.cluster.SnappyPrms-suspectStrings =
+"java.lang.IllegalStateException"
+"NullPointerException"
+;
+*/
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/startDualModeCluster.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/startDualModeCluster.conf
index 08e49b37fa..4344c510ba 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/startDualModeCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/startDualModeCluster.conf
@@ -88,3 +88,5 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
 INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_recordProcessIDWithHost
             runMode = always
             threadGroups = snappyThreads, locatorThreads, snappyStoreThreads, leadThreads, workerThreads;
+
+INCLUDE $JTESTS/io/snappydata/hydra/cluster/expectedExceptionList.inc;
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf
index fc130d4fae..45aeb69631 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/stopDualModeCluster.conf
@@ -16,6 +16,9 @@ ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
 ENDTASK     taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_cleanUpSnappyProcessesOnFailure
             clientNames = locator1;
 
+ENDTASK     taskClass = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_checkSuspectStrings
+            clientNames = locator1;
+
 hydra.Prms-totalTaskTimeSec           = 100;
 hydra.Prms-maxResultWaitSec           = 3600;
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 714238757c..135bf0e2b6 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -17,14 +17,15 @@
 
 package io.snappydata.hydra
 
-import java.io.{BufferedReader, File, FileNotFoundException, FileReader, IOException, PrintWriter}
-
-import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.encoders.RowEncoder
+import java.io.{File, FileNotFoundException, IOException, PrintWriter}
 import java.util
+import java.util.Collections
 
 import scala.io.Source
 
+import org.apache.spark.sql._
+import org.apache.spark.sql.catalyst.encoders.RowEncoder
+
 object SnappyTestUtils {
 
   var validateFullResultSet: Boolean = false;
@@ -246,27 +247,32 @@ object SnappyTestUtils {
    In case of round-off, there is a difference of .1 in snappy and spark results. We can ignore
    such differences
    */
-  def isIgnorable(actualLine: String, expectedLine: String): Boolean = {
-    var canBeIgnored = false
-    if ((actualLine != null && actualLine.size > 0) && (expectedLine != null && expectedLine.size
-     > 0)) {
-      val actualArray = actualLine.split(",")
-      val expectedArray = expectedLine.split(",")
+  def isIgnorable(actualRow: String, expectedRow: String): Boolean = {
+    var isIgnorable = false
+    if (actualRow != null && actualRow.size > 0 && expectedRow != null && expectedRow.size > 0) {
+      val actualArray = actualRow.split(",")
+      val expectedArray = expectedRow.split(",")
       var diff: Double = 0.0
-      if(actualArray.length != expectedArray.length){
-        canBeIgnored = false
-      } else {
+      if(actualArray.length == expectedArray.length){
         for (i <- 0 to actualArray.length) {
-          val value1 = actualArray(i).toDouble
-          val value2 = expectedArray(i).toDouble
-          if(value1 > value2) diff = value1.-(value2).doubleValue
-          else diff = value2.-(value1).doubleValue
-          println("diff is " + diff)
-          if (diff <= 0.01) canBeIgnored = true
+          val value1: String = actualArray(i)
+          val value2: String = expectedArray(i)
+          if (!value1.equals(value2)) {
+            try {
+              val val1: Double = value1.toDouble
+              val val2: Double = value2.toDouble
+              if (val1 > val2) diff = val1.-(val2).doubleValue
+              else diff = val2.-(val1).doubleValue
+              println("diff is " + diff)
+              if (diff <= 0.01) isIgnorable = true
+            } catch {
+              case nfe: NumberFormatException => return false
+            }
+          }
         }
       }
     }
-    return canBeIgnored
+    isIgnorable
   }
 
   def executeProcess(pb: ProcessBuilder, logFile: File, pw: PrintWriter): Int = {
@@ -313,8 +319,7 @@ object SnappyTestUtils {
     val actualFile = snappyFile.listFiles.filter(_.getName.endsWith(".csv"))
 
     hasValidationFailed = compareFiles(getQueryResultDir("snappyQueryFiles"), actualFile.iterator
-        .next().getAbsolutePath, expectedFile.iterator.next().getAbsolutePath,
-      pw, queryNum, hasValidationFailed)
+        .next().getAbsolutePath, expectedFile.iterator.next().getAbsolutePath, pw, queryNum)
 
     /*
     val expectedLineSet = Source.fromFile(expectedFile.iterator.next()).getLines()
@@ -355,7 +360,7 @@ object SnappyTestUtils {
   }
 
   def compareFiles(dir: String, snappyResultsFile: String, sparkResultsFile: String,
-      pw: PrintWriter, queryNum: String, hasValidationFailed: Boolean): Boolean = {
+      pw: PrintWriter, queryNum: String): Boolean = {
     val aStr = new StringBuilder
     var pb: ProcessBuilder = null
     var command: String = null
@@ -393,9 +398,8 @@ object SnappyTestUtils {
     } catch {
       case fe: FileNotFoundException =>
         pw.println("Could not find file to compare results.", fe)
-        false
+        return true
     }
-    var line: String = ""
     val unexpected = new util.ArrayList[String]
     val missing = new util.ArrayList[String]
     try {
@@ -407,31 +411,45 @@ object SnappyTestUtils {
       case ie: IOException =>
         pw.println("Got exception while reading resultset files", ie)
     }
+
     if (missing.size > 0) {
       if (missing.size < 20) {
-        aStr.append("\nThe following " + missing.size + " rows are missing from snappy resultset:")
+        aStr.append(s"\nThe following ${missing.size} rows are missing from snappy resultset: \n")
         aStr.append(missing.toString)
       }
       else {
-        aStr.append("There are " + missing.size + " rows missing in snappy for " + queryNum + "." +
-            " " + "Please check " + missingFileName)
+        aStr.append(s"There are ${missing.size} rows missing from snappy for $queryNum. Please " +
+            s"check $missingFileName")
       }
       aStr.append("\n")
     }
     if (unexpected.size > 0) {
       if (unexpected.size < 20) {
-        aStr.append("\nThe following " + unexpected.size +
-            " rows from snappy resultset are unexpected: ")
+        aStr.append(s"\nThe following ${unexpected.size} rows are unexpected in snappy " +
+            s"resultset:\n")
         aStr.append(unexpected.toString)
       }
       else {
-        aStr.append("There are " + unexpected.size + " rows unexpected in snappy for " + queryNum +
-            ". Please check " + unexpectedFileName)
+        aStr.append(s"There are ${unexpected.size} rows unexpected in snappy for $queryNum. " +
+            s"Please check $unexpectedFileName")
       }
       aStr.append("\n")
     }
     pw.println(aStr.toString)
-    hasValidationFailed
+
+    if(missing.size() == unexpected.size()) {
+      Collections.sort(missing)
+      Collections.sort(unexpected)
+      for (i <- missing.size()) {
+        if (!isIgnorable(missing.get(i), unexpected.get(i))) true
+      }
+      aStr.setLength(0) // data mismatch can be ignored
+    }
+    if (aStr.length() > 0) {
+      true
+    } else {
+      false
+    }
   }
 
   /*

From 57e82fbe12b9d2ded01509ffe5480a715422a2f1 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Tue, 30 Oct 2018 15:14:27 +0530
Subject: [PATCH 17/33] - Adding test flag to avoid hydra to stop the snappy
 and spark clusters. - Minor test fixes.

---
 .../io/snappydata/hydra/cluster/SnappyPrms.java     | 13 +++++++++++--
 .../io/snappydata/hydra/cluster/SnappyTest.java     | 10 ++++++++++
 .../hydra/cluster/expectedExceptionList.inc         |  5 ++---
 .../scala/io/snappydata/hydra/SnappyTestUtils.scala |  2 +-
 4 files changed, 24 insertions(+), 6 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
index 758ef76c42..74c2c1106a 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
@@ -655,7 +655,7 @@ public static String[] getExpectedExceptionListForHA() {
   }
 
   /**
-   * Sleep time in secs between 2 thread dumps.
+   * (int)Sleep time in secs between 2 thread dumps.
    */
   public static Long sleepBtwnStackDump;
 
@@ -664,12 +664,21 @@ public static int getSleepBtwnStackDumps(){
     return tasktab().intAt(key, tab().intAt(key, 5));
   }
 
+  /**
+   * (boolean) Flag to notify on whether to keep the clusters running after hydra tasks are done.
+   */
+  public static Long keepClusterRunning;
+
+  public static boolean isKeepClusterRunning(){
+    Long key = keepClusterRunning;
+    return tasktab().booleanAt(key, tab().booleanAt(key, false));
+  }
+
   public static String getCredentialFile() {
     Long key = credentialFile;
     return BasePrms.tasktab().stringAt(key, BasePrms.tab().stringAt(key, null));
   }
 
-
   public static boolean isSecurityOn() {
     Long key = isSecurity;
     return tasktab().booleanAt(key, tab().booleanAt(key, false));
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index e210831d1c..5d35c3c040 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -1916,6 +1916,9 @@ public static String getCurrentDirPath() {
    * Task(ENDTASK) for cleaning up snappy processes, because they are not stopped by Hydra in case of Test failure.
    */
   public static void HydraTask_cleanUpSnappyProcessesOnFailure() {
+    if(SnappyPrms.isKeepClusterRunning()) {
+      return;
+    }
     Process pr = null;
     ProcessBuilder pb = null;
     File logFile = null, log = null, nukeRunOutput = null;
@@ -2866,6 +2869,9 @@ public static synchronized void HydraTask_startSparkCluster() {
    * Stops Spark Cluster.
    */
   public static synchronized void HydraTask_stopSparkCluster() {
+    if (SnappyPrms.isKeepClusterRunning()) {
+      return;
+    }
     File log = null;
     try {
       initSnappyArtifacts();
@@ -2879,6 +2885,7 @@ public static synchronized void HydraTask_stopSparkCluster() {
       String s = "problem occurred while retriving destination logFile path " + log;
       throw new TestException(s, e);
     }
+
   }
 
   /**
@@ -2947,6 +2954,9 @@ public static synchronized void HydraTask_stopSnappyLocator() {
   }
 
   public static synchronized void HydraTask_stopSnappyCluster() {
+    if (SnappyPrms.isKeepClusterRunning()) {
+      return;
+    }
     File log = null;
     try {
       initSnappyArtifacts();
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
index 22ceb91a4c..74b4d8de60 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
@@ -17,9 +17,8 @@ io.snappydata.hydra.cluster.SnappyPrms-expectedExceptions =
 ;
 
 io.snappydata.hydra.cluster.SnappyPrms-expectedExceptionsForHA =
-""
-""
-
+"java.lang.reflect.InvocationTargetException"
+" java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext"
 ;
 
 /*
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 135bf0e2b6..2fef9c3e54 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -440,7 +440,7 @@ object SnappyTestUtils {
     if(missing.size() == unexpected.size()) {
       Collections.sort(missing)
       Collections.sort(unexpected)
-      for (i <- missing.size()) {
+      for (i <- 0 to missing.size()) {
         if (!isIgnorable(missing.get(i), unexpected.get(i))) true
       }
       aStr.setLength(0) // data mismatch can be ignored

From 6215825315ccd84f4a1624875c28046e489884b1 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Fri, 2 Nov 2018 11:36:17 +0530
Subject: [PATCH 18/33] - Fixing issue with return for grep command process
 output.

---
 .../src/test/java/io/snappydata/hydra/cluster/SnappyTest.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 5d35c3c040..c851b65d92 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -27,7 +27,6 @@
 import org.apache.commons.io.filefilter.IOFileFilter;
 import org.apache.commons.io.filefilter.TrueFileFilter;
 import org.apache.commons.io.filefilter.WildcardFileFilter;
-import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.spark.SparkContext;
 import org.apache.spark.sql.SnappyContext;
@@ -1800,7 +1799,8 @@ public int executeProcess(ProcessBuilder pb, File logFile) {
         assert p.getInputStream().read() == -1;
       }
       int rc = p.waitFor();
-      if ((rc == 0) || (pb.command().contains("grep") && rc == 1)) {
+      String pbCmd = Arrays.toString(pb.command().toArray());
+      if ((rc == 0) || (pbCmd.contains("grep -v -F") && rc == 1)) {
         Log.getLogWriter().info("Executed successfully");
         return 0;
       } else {

From 151f872a5f16f7d4ae98b3cfe8712ccc6d39e185 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Tue, 13 Nov 2018 10:27:01 +0530
Subject: [PATCH 19/33] - Changes to make hydra TPCH test working after changes
 in the perf framework. - Minor test fixes

---
 .../snappydata/hydra/cluster/SnappyTest.java  |  3 ++
 .../io/snappydata/hydra/distJoin/distJoin.bt  | 42 +++++++++++++------
 .../hydra/distJoin/distJoinWithLeadHA.conf    | 16 +++----
 .../hydra/distJoin/distJoinWithServerHA.conf  | 16 +++----
 .../io/snappydata/hydra/SnappyTestUtils.scala | 10 +++--
 5 files changed, 54 insertions(+), 33 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index bf8a811665..d67145003b 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -2764,6 +2764,9 @@ public static synchronized void HydraTask_createAndStartSnappyLocator() {
   */
 
   public static synchronized void HydraTask_dumpStacks() {
+    snappyTest = new SnappyTest();
+    snappyTest.getClientHostDescription();
+    int tid = RemoteTestModule.getCurrentThread().getThreadId();
     snappyTest.dumpStacks();
   }
 
diff --git a/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoin.bt b/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoin.bt
index 3ff1911734..0d9e11f3c9 100644
--- a/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoin.bt
+++ b/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoin.bt
@@ -2,32 +2,48 @@ io/snappydata/hydra/distJoin/distJoinWithLeadHA.conf
   A=snappyStore snappyStoreHosts=2 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  dataLocation="$GEMFIRE/../../../tests/common/src/main/resources/TPCH"
+  buckets_Order_Lineitem=128
+  buckets_Cust_Part_PartSupp=128
+  isSupplierColumnTable=true
+  buckets_Supplier=128
   redundantCopies=1
-  buckets_Order_Lineitem=29
-  buckets_Cust_Part_PartSupp=7
+  persistence=true
+  persistence_type="SYNCHRONOUS"
+  numberOfLoadStages=1
+  isParquet=false
+  createParquet=false
+  traceEvents=false
   queries="1-3-4-5-6-7-8-10-11-12-13-14-15-16-17-18-19-20-22"
   queryPlan=false
-  shufflePartitions=4
-  inMemoryColumnarStorageCompressed=false
-  dataLocation="$GEMFIRE/../../../tests/common/src/main/resources/TPCH"
-  useIndex=true
   resultCollection=true
-  warmUpIterations=5
+  warmUps=5
   actualRuns=5
+  isDynamic=true
+  resultCollection=false
+  randomSeed=42
 
 io/snappydata/hydra/distJoin/distJoinWithServerHA.conf
   A=snappyStore snappyStoreHosts=2 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
   B=lead leadHosts=2 leadVMsPerHost=1 leadThreadsPerVM=2
   C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  dataLocation="$GEMFIRE/../../../tests/common/src/main/resources/TPCH"
+  buckets_Order_Lineitem=128
+  buckets_Cust_Part_PartSupp=128
+  isSupplierColumnTable=true
+  buckets_Supplier=128
   redundantCopies=1
-  buckets_Order_Lineitem=29
-  buckets_Cust_Part_PartSupp=7
+  persistence=true
+  persistence_type="SYNCHRONOUS"
+  numberOfLoadStages=1
+  isParquet=false
+  createParquet=false
+  traceEvents=false
   queries="1-3-4-5-6-7-8-10-11-12-13-14-15-16-17-18-19-20-22"
   queryPlan=false
-  shufflePartitions=4
-  inMemoryColumnarStorageCompressed=false
-  dataLocation="$GEMFIRE/../../../tests/common/src/main/resources/TPCH"
-  useIndex=true
   resultCollection=true
   warmUpIterations=5
   actualRuns=5
+  isDynamic=true
+  resultCollection=false
+  randomSeed=42
diff --git a/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithLeadHA.conf b/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithLeadHA.conf
index 9af1b6f238..a83199e30c 100644
--- a/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithLeadHA.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithLeadHA.conf
@@ -89,25 +89,25 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = b
             threadGroups = snappyThreads;
 
 INITTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Tables
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.TableCreationJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "dataLocation=${dataLocation},Buckets_Order_Lineitem=${buckets_Order_Lineitem},Buckets_Cust_Part_PartSupp=${buckets_Cust_Part_PartSupp},useIndex=${useIndex}"
+           "dataLocation=$dataLocation,Buckets_Order_Lineitem=$buckets_Order_Lineitem,Buckets_Cust_Part_PartSupp=$buckets_Cust_Part_PartSupp,IsSupplierColumnTable=$isSupplierColumnTable,Buckets_Supplier=$buckets_Supplier,Redundancy=$redundantCopies,Persistence=$persistence,Persistence_Type=$persistence_type,numberOfLoadStages=$numberOfLoadStages,isParquet=$isParquet,createParquet=$createParquet,traceEvents=$traceEvents"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = snappyThreads
            ;
 
 INITTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Query
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.QueryExecutionJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "queries=${queries},queryPlan=${queryPlan},spark.sql.shuffle.partitions=${shufflePartitions},spark.sql.inMemoryColumnarStorage.compressed=${inMemoryColumnarStorageCompressed},useIndex=${useIndex},resultCollection=${resultCollection},warmUpIterations=${warmUpIterations},actualRuns=${actualRuns}"
+           "queries=$queries,isDynamic=$isDynamic,resultCollection=$resultCollection,warmUpIterations=$warmUps,actualRuns=$actualRuns,threadNumber=1,traceEvents=$traceEvents,randomSeed=$randomSeed"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappy-cluster*tests.jar
            threadGroups = snappyThreads
            ;
 
 TASK       taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Query
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.QueryExecutionJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "queries=${queries},queryPlan=${queryPlan},spark.sql.shuffle.partitions=${shufflePartitions},spark.sql.inMemoryColumnarStorage.compressed=${inMemoryColumnarStorageCompressed},useIndex=${useIndex},resultCollection=${resultCollection},warmUpIterations=${warmUpIterations},actualRuns=${actualRuns}"
+           "queries=$queries,isDynamic=$isDynamic,resultCollection=$resultCollection,warmUpIterations=$warmUps,actualRuns=$actualRuns,threadNumber=1,traceEvents=$traceEvents,randomSeed=$randomSeed"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = snappyThreads
            weight = 60
@@ -115,9 +115,9 @@ TASK       taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = Hy
            ;
 
 TASK       taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Query
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.QueryExecutionJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "queries=11-12-13-14-15-16-17,queryPlan=${queryPlan},spark.sql.shuffle.partitions=${shufflePartitions},spark.sql.inMemoryColumnarStorage.compressed=${inMemoryColumnarStorageCompressed},useIndex=${useIndex},resultCollection=${resultCollection},warmUpIterations=${warmUpIterations},actualRuns=${actualRuns}"
+           "queries=11-12-13-14-15-16-17,isDynamic=$isDynamic,resultCollection=$resultCollection,warmUpIterations=$warmUps,actualRuns=$actualRuns,threadNumber=1,traceEvents=$traceEvents,randomSeed=$randomSeed"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = leadThreads
            maxThreads = 1
diff --git a/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithServerHA.conf b/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithServerHA.conf
index aab85ef05b..e16e17da77 100644
--- a/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithServerHA.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/distJoin/distJoinWithServerHA.conf
@@ -89,26 +89,26 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = b
             threadGroups = snappyThreads;
 
 INITTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Tables
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.TableCreationJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "dataLocation=${dataLocation},Buckets_Order_Lineitem=${buckets_Order_Lineitem},Buckets_Cust_Part_PartSupp=${buckets_Cust_Part_PartSupp},useIndex=${useIndex}"
+           "dataLocation=$dataLocation,Buckets_Order_Lineitem=$buckets_Order_Lineitem,Buckets_Cust_Part_PartSupp=$buckets_Cust_Part_PartSupp,IsSupplierColumnTable=$isSupplierColumnTable,Buckets_Supplier=$buckets_Supplier,Redundancy=$redundantCopies,Persistence=$persistence,Persistence_Type=$persistence_type,numberOfLoadStages=$numberOfLoadStages,isParquet=$isParquet,createParquet=$createParquet,traceEvents=$traceEvents"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = snappyThreads
            ;
 
 
 INITTASK   taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Query
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.QueryExecutionJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "queries=${queries},queryPlan=${queryPlan},spark.sql.shuffle.partitions=${shufflePartitions},spark.sql.inMemoryColumnarStorage.compressed=${inMemoryColumnarStorageCompressed},useIndex=${useIndex},resultCollection=${resultCollection},warmUpIterations=${warmUpIterations},actualRuns=${actualRuns}"
+           "queries=$queries,isDynamic=$isDynamic,resultCollection=$resultCollection,warmUpIterations=$warmUps,actualRuns=$actualRuns,threadNumber=1,traceEvents=$traceEvents,randomSeed=$randomSeed"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = snappyThreads
            ;
 
 TASK       taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Query
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.QueryExecutionJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "queries=${queries},queryPlan=${queryPlan},spark.sql.shuffle.partitions=${shufflePartitions},spark.sql.inMemoryColumnarStorage.compressed=${inMemoryColumnarStorageCompressed},useIndex=${useIndex},resultCollection=${resultCollection},warmUpIterations=${warmUpIterations},actualRuns=${actualRuns}"
+           "queries=$queries,isDynamic=$isDynamic,resultCollection=$resultCollection,warmUpIterations=$warmUps,actualRuns=$actualRuns,threadNumber=1,traceEvents=$traceEvents,randomSeed=$randomSeed"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = snappyThreads
            weight = 60
@@ -116,9 +116,9 @@ TASK       taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = Hy
            ;
 
 TASK       taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
-           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.TPCH_Snappy_Query
+           io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.benchmark.snappy.tpch.QueryExecutionJob
            io.snappydata.hydra.cluster.SnappyPrms-appPropsForJobServer =
-           "queries=11-12-13-14-15-16-17,queryPlan=${queryPlan},spark.sql.shuffle.partitions=${shufflePartitions},spark.sql.inMemoryColumnarStorage.compressed=${inMemoryColumnarStorageCompressed},useIndex=${useIndex},resultCollection=${resultCollection},warmUpIterations=${warmUpIterations},actualRuns=${actualRuns}"
+           "queries=11-12-13-14-15-16-17,isDynamic=$isDynamic,resultCollection=$resultCollection,warmUpIterations=$warmUps,actualRuns=$actualRuns,threadNumber=1,traceEvents=$traceEvents,randomSeed=$randomSeed"
            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-cluster*tests.jar
            threadGroups = snappyStoreThreads
            maxThreads = 1
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index f01c9e0027..a2da6935c8 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -19,7 +19,7 @@ package io.snappydata.hydra
 
 import java.io.{File, FileNotFoundException, IOException, PrintWriter}
 import java.util
-import java.util.Collections
+import java.util.{Arrays, Collections}
 
 import scala.io.Source
 
@@ -254,7 +254,7 @@ object SnappyTestUtils {
       val expectedArray = expectedRow.split(",")
       var diff: Double = 0.0
       if(actualArray.length == expectedArray.length){
-        for (i <- 0 to actualArray.length) {
+        for (i <- 0 to actualArray.length - 1) {
           val value1: String = actualArray(i)
           val value2: String = expectedArray(i)
           if (!value1.equals(value2)) {
@@ -290,7 +290,8 @@ object SnappyTestUtils {
         assert(p.getInputStream.read == -1)
       }
       val rc = p.waitFor
-      if ((rc == 0) || (pb.command.contains("grep") && rc == 1)) {
+      val pbCmd = util.Arrays.toString(pb.command.toArray)
+      if ((rc == 0) || (pbCmd.contains("grep -v -F") && rc == 1)) {
         pw.println("Process executed successfully")
         0
       }
@@ -437,12 +438,13 @@ object SnappyTestUtils {
     }
     pw.println(aStr.toString)
 
-    if(missing.size() == unexpected.size()) {
+    if((missing.size() > 0) && missing.size() == unexpected.size()) {
       Collections.sort(missing)
       Collections.sort(unexpected)
       for (i <- 0 to missing.size()) {
         if (!isIgnorable(missing.get(i), unexpected.get(i))) true
       }
+      pw.println("This mismatch can be ignored.")
       aStr.setLength(0) // data mismatch can be ignored
     }
     if (aStr.length() > 0) {

From 05c3987718c2fd5ee39c21120af2f5397f474bce Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Tue, 13 Nov 2018 15:08:24 +0530
Subject: [PATCH 20/33] - Adding local conf for snapshotIsolation test.

---
 .../snapshotIsolation/local.snapshotIsolation.conf   | 12 ++++++++++++
 1 file changed, 12 insertions(+)
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/local.snapshotIsolation.conf

diff --git a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/local.snapshotIsolation.conf b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/local.snapshotIsolation.conf
new file mode 100644
index 0000000000..f2be760208
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/local.snapshotIsolation.conf
@@ -0,0 +1,12 @@
+hydra.Prms-removeDiskFilesAfterTest = true;
+hydra.Prms-extraDerbyServerVMArgs += " -Xmx4096m -Dderby.storage.pageCacheSize=32000 -Dderby.locks.waitTimeout=60 -Dderby.locks.deadlockTimeout=40 ";
+hydra.Prms-extraDerbyServerVMArgs += " -XX:MaxPermSize=128m ";
+
+io.snappydata.hydra.cluster.SnappyPrms-serverLauncherProps = " -heap-size=4g -memory-size=3g ";
+//io.snappydata.hydra.cluster.SnappyPrms-serverLauncherProps += " -J-Dgemfire.VersionVector.VERBOSE=true -J-Dgemfirexd.debug.true=TraceTran,TraceQuery ";
+io.snappydata.hydra.cluster.SnappyPrms-locatorLauncherProps = " ";
+
+io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g;
+io.snappydata.hydra.cluster.SnappyPrms-leadMemory = 4g;
+
+//io.snappydata.hydra.cluster.SnappyPrms-logLevel = fine;

From d532c5b7ab388825728100e60aae8aee207bf838 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Thu, 15 Nov 2018 09:55:52 +0530
Subject: [PATCH 21/33] - Fix in the suspect string code.

---
 .../snappydata/hydra/cluster/SnappyTest.java   | 18 ++++--------------
 .../io/snappydata/hydra/SnappyTestUtils.scala  |  4 ++--
 2 files changed, 6 insertions(+), 16 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 261c41f1fb..dd33928c11 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -2788,26 +2788,16 @@ public void checkSuspectStrings() {
     cmd.append("grep -r Exception " + getCurrentDirPath()).append(" | grep .log");
     String[] expectedExceptions = SnappyPrms.getExpectedExceptionList();
     if(cycleVms) {
-
       List<String> exceptions = Arrays.asList(expectedExceptions);
       exceptions.addAll(Arrays.asList(SnappyPrms.getExpectedExceptionListForHA()));
       expectedExceptions = (String[])exceptions.toArray();
     }
-    for (int i = 0; i < expectedExceptions.length; i++) {
-      cmd.append(" | grep -v ").append(expectedExceptions[i]);
-    }
+    for (int i = 0; i < expectedExceptions.length; i++)
+      cmd.append(" | grep -v \"").append(expectedExceptions[i] + "\"");
+    cmd.append("| grep -v .inc");
     Log.getLogWriter().info("grep command is : " + cmd);
     ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", cmd.toString());
-    pb.redirectErrorStream(true);
-    pb.redirectOutput(ProcessBuilder.Redirect.appendTo(suspectStringFile));
-    try {
-      pr = pb.start();
-      pr.waitFor();
-    } catch (IOException ie) {
-
-    } catch (InterruptedException ie) {
-
-    }
+    executeProcess(pb,suspectStringFile);
   }
 
   /**
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index a2da6935c8..bb84e6b575 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -254,7 +254,7 @@ object SnappyTestUtils {
       val expectedArray = expectedRow.split(",")
       var diff: Double = 0.0
       if(actualArray.length == expectedArray.length){
-        for (i <- 0 to actualArray.length - 1) {
+        for (i <- 0 until actualArray.length) {
           val value1: String = actualArray(i)
           val value2: String = expectedArray(i)
           if (!value1.equals(value2)) {
@@ -441,7 +441,7 @@ object SnappyTestUtils {
     if((missing.size() > 0) && missing.size() == unexpected.size()) {
       Collections.sort(missing)
       Collections.sort(unexpected)
-      for (i <- 0 to missing.size()) {
+      for (i <- 0 until missing.size()) {
         if (!isIgnorable(missing.get(i), unexpected.get(i))) true
       }
       pw.println("This mismatch can be ignored.")

From 05d1d44424f15a78c39f6450aad3109f01d47dd9 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Fri, 16 Nov 2018 17:46:58 +0530
Subject: [PATCH 22/33] - Adding timestamp to the logs printed in the sprak app
 or snappy job output files. - Some changes to the checkSuspectString code to
 grep exceptions.

---
 .../snappydata/hydra/cluster/SnappyPrms.java  |  17 ++-
 .../snappydata/hydra/cluster/SnappyTest.java  |  41 +++++--
 .../hydra/cluster/expectedExceptionList.inc   |   8 +-
 .../io/snappydata/hydra/SnappyTestUtils.scala | 102 ++++++++++--------
 .../hydra/ct/CreateAndLoadCTTablesApp.scala   |  14 ++-
 .../hydra/ct/CreateAndLoadCTTablesJob.scala   |  16 +--
 .../hydra/ct/ValidateCTQueriesApp.scala       |  13 ++-
 .../hydra/ct/ValidateCTQueriesJob.scala       |  13 ++-
 .../northwind/CreateAndLoadNWTablesJob.scala  |  20 ++--
 .../CreateAndLoadNWTablesSparkApp.scala       |  16 +--
 .../hydra/northwind/NWTestUtil.scala          |  18 ++--
 .../northwind/ValidateNWQueriesApp.scala      |  13 ++-
 .../northwind/ValidateNWQueriesJob.scala      |  12 ++-
 13 files changed, 188 insertions(+), 115 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
index 997c5f04b5..022385531f 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
@@ -16,7 +16,6 @@
  */
 package io.snappydata.hydra.cluster;
 
-import java.util.List;
 import java.util.Vector;
 
 import hydra.BasePrms;
@@ -654,6 +653,22 @@ public static String[] getExpectedExceptionListForHA() {
     return strArr;
   }
 
+  /**
+   * Parameter used to get the List of expected exceptions in the test run in case of HA.
+   * Array of values for expected exceptions
+   */
+  public static Long knownExceptionsForHA;
+
+  public static String[] getKnownExceptionListForHA() {
+    Long key = knownExceptionsForHA;
+    Vector statements = tab().vecAt(key, new HydraVector());
+    String[] strArr = new String[statements.size()];
+    for (int i = 0; i < statements.size(); i++) {
+      strArr[i] = (String)statements.elementAt(i);
+    }
+    return strArr;
+  }
+
   /**
    * (int)Sleep time in secs between 2 thread dumps.
    */
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index dd33928c11..9dc70d9973 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -741,7 +741,6 @@ private static synchronized List<String> getEndpoints(String type) {
     return endpoints;
   }
 
-
   /**
    * Returns PIDs for all the processes started in the test, e.g. locator, server, lead .
    */
@@ -758,7 +757,6 @@ private static synchronized Set<String> getPidList() {
     return pidList;
   }
 
-
   /**
    * Returns hostname of the process
    */
@@ -774,7 +772,6 @@ protected static synchronized String getPidHost(String pid) {
     return pidHost;
   }
 
-
   /**
    * Returns primary lead port .
    */
@@ -791,7 +788,6 @@ private static synchronized String getPrimaryLeadPort(String clientName) {
     return port;
   }
 
-
   protected void initHydraThreadLocals() {
     this.connection = getConnection();
   }
@@ -2776,16 +2772,17 @@ public void dumpStacks() {
   /*
   * Check if there is any suspect strings in the test. To be executed at the end of test.
   */
+
   public static synchronized void HydraTask_checkSuspectStrings() {
     snappyTest.checkSuspectStrings();
   }
 
   public void checkSuspectStrings() {
-    Process pr = null;
     String checkSuspectOutPut = getCurrentDirPath() + File.separator + "suspectStrings.txt";
     File suspectStringFile = new File(checkSuspectOutPut);
     StringBuilder cmd = new StringBuilder();
-    cmd.append("grep -r Exception " + getCurrentDirPath()).append(" | grep .log");
+    StringBuilder exceptedExcep = new StringBuilder();
+
     String[] expectedExceptions = SnappyPrms.getExpectedExceptionList();
     if(cycleVms) {
       List<String> exceptions = Arrays.asList(expectedExceptions);
@@ -2793,13 +2790,39 @@ public void checkSuspectStrings() {
       expectedExceptions = (String[])exceptions.toArray();
     }
     for (int i = 0; i < expectedExceptions.length; i++)
-      cmd.append(" | grep -v \"").append(expectedExceptions[i] + "\"");
-    cmd.append("| grep -v .inc");
+      exceptedExcep.append(" | grep -v \"").append(expectedExceptions[i] + "\"");
+
+    cmd.setLength(0);
+    cmd.append("find " + getCurrentDirPath() + " -type f \\( -name \"*.log\" -not -iname " +
+        "\"*aster*.log\" -or -name \"*.out\" \\) ");
+    cmd.append(" | xargs grep Exception ");
+    cmd.append(exceptedExcep.toString());
+    cmd. append(" | grep -v \\.java:");
     Log.getLogWriter().info("grep command is : " + cmd);
     ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", cmd.toString());
-    executeProcess(pb,suspectStringFile);
+    executeProcess(pb, suspectStringFile);
+
+    if(suspectStringFile.length() != 0){
+      try {
+        StringBuilder exceptionList = new StringBuilder();
+        if(cycleVms){
+
+        }
+        BufferedReader reader = new BufferedReader(new FileReader(suspectStringFile));
+        String line = "";
+        while((line = reader.readLine()) != null)
+          exceptionList.append(line).append("\n");
+        throw new TestException("Unknown Exceptions observed in the run " + exceptionList
+            .toString());
+      } catch(FileNotFoundException fe) {
+
+      } catch(IOException ie) {
+
+      }
+    }
   }
 
+
   /**
    * Create and start snappy server.
    */
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
index 74b4d8de60..416e0e5034 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
@@ -18,9 +18,15 @@ io.snappydata.hydra.cluster.SnappyPrms-expectedExceptions =
 
 io.snappydata.hydra.cluster.SnappyPrms-expectedExceptionsForHA =
 "java.lang.reflect.InvocationTargetException"
-" java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext"
+"java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext"
 ;
 
+io.snappydata.hydra.cluster.SnappyPrms-knownExceptionsForHA =
+"java.lang.reflect.InvocationTargetException"
+"java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext"
+;
+
+
 /*
 io.snappydata.hydra.cluster.SnappyPrms-suspectStrings =
 "java.lang.IllegalStateException"
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index bb84e6b575..897e506c69 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -18,8 +18,9 @@
 package io.snappydata.hydra
 
 import java.io.{File, FileNotFoundException, IOException, PrintWriter}
+import java.sql.Timestamp
 import java.util
-import java.util.{Arrays, Collections}
+import java.util.Collections
 
 import scala.io.Source
 
@@ -32,6 +33,10 @@ object SnappyTestUtils {
   var numRowsValidation: Boolean = false;
   var tableType: String = null
 
+  def logTime: String = {
+    "[" + new Timestamp(System.currentTimeMillis()).toString + "] "
+  }
+
   /*
   Executes the join query, matches only the full result with expected result, returns false if the
   query validation has failed.
@@ -93,34 +98,35 @@ object SnappyTestUtils {
     assertQuery(snc, sqlString, 0, queryNum, pw, sqlContext, true)
   }
 
-    def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-        pw: PrintWriter, sqlContext: SQLContext): Boolean = {
-      numRowsValidation = true
-      assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext, true)
-    }
+  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
+      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
+    numRowsValidation = true
+    assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext, true)
+  }
 
-    /*
-   Executes the query, matches the result with expected result, returns false if the query
-   validation has failed.
-   */
+  /*
+ Executes the query, matches the result with expected result, returns false if the query
+ validation has failed.
+ */
   def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
       pw: PrintWriter, sqlContext: SQLContext, usePlanCaching: Boolean): Boolean = {
     var validationFailed = false
     var snappyDF: DataFrame = null
-    if(!usePlanCaching) {
+    if (!usePlanCaching) {
       snappyDF = snc.sqlUncached(sqlString)
     } else {
       snappyDF = snc.sql(sqlString)
     }
     val count = snappyDF.count
     // scalastyle:off println
+    pw.println(s"\n${logTime} Executing Query $queryNum ...")
     println(s"Query $queryNum")
     snappyDF.explain(true)
     if (numRowsValidation) {
-      pw.println(s"Query ${queryNum} returned ${count} rows for ${tableType} table")
+      pw.println(s"${logTime} Query ${queryNum} returned ${count} rows for ${tableType} table")
       if (count != numRows) {
-        pw.println(s"Result mismatch for query ${queryNum} : found ${count} rows but expected " +
-            s" ${numRows} rows.")
+        pw.println(s"${logTime} Result mismatch for query ${queryNum}: found ${count} rows but " +
+            s"expected ${numRows} rows.")
         validationFailed = true
       }
       pw.flush()
@@ -128,14 +134,14 @@ object SnappyTestUtils {
     var fullRSValidationFailed: Boolean = false
     if (validateFullResultSet) {
       val snappyQueryFileName = s"Snappy_${queryNum}"
-      val snappyDest: String = getQueryResultDir("snappyQueryFiles") +
+      val snappyDest: String = getQueryResultDir("snappyResults") +
           File.separator + snappyQueryFileName
       // scalastyle:off println
       // pw.println(s"Snappy query results are at : ${snappyDest}")
       val snappyFile: File = new java.io.File(snappyDest)
 
       val sparkQueryFileName = s"Spark_${queryNum}"
-      val sparkDest: String = getQueryResultDir("sparkQueryFiles") + File.separator +
+      val sparkDest: String = getQueryResultDir("sparkResults") + File.separator +
           sparkQueryFileName
       // pw.println(s"Spark query results are at : ${sparkDest}")
       val sparkFile: File = new java.io.File(sparkDest)
@@ -148,7 +154,7 @@ object SnappyTestUtils {
           snappyDF = snappyDF.repartition(1) // .sortWithinPartitions(snap_col1, snap_col: _*)
           writeToFile(snappyDF, snappyDest, snc)
           // writeResultSetToCsv(snappyDF, snappyFile)
-          pw.println(s"${queryNum} Result Collected in file ${snappyDest}")
+          pw.println(s"${logTime} Snappy result collected in : ${snappyDest}")
         }
         if (!sparkFile.exists()) {
           // val col1 = sparkDF.schema.fieldNames(0)
@@ -156,26 +162,30 @@ object SnappyTestUtils {
           sparkDF = sparkDF.repartition(1) // .sortWithinPartitions(col1, col: _*)
           writeToFile(sparkDF, sparkDest, snc)
           // writeResultSetToCsv(sparkDF, sparkFile)
-          pw.println(s"${queryNum} Result Collected in file ${sparkDest}")
+          pw.println(s"${logTime} Spark result collected in : ${sparkDest}")
         }
         fullRSValidationFailed = compareFiles(snappyFile, sparkFile, pw, queryNum,
-           fullRSValidationFailed)
+          fullRSValidationFailed)
       } catch {
         case ex: Exception => {
           fullRSValidationFailed = true
-          pw.println(s"Full resultSet validation for ${queryNum} got the following exception:\n")
+          pw.println(s"${logTime} Full resultSet validation for ${queryNum} got the following " +
+              s"exception:\n")
           ex.printStackTrace(pw)
         }
       }
       pw.flush()
     }
     if (validationFailed) {
-      pw.println(s"\nNumRows validation failed for query ${queryNum} on ${tableType} table.")
+      pw.println(s"\n${logTime} NumRows validation failed for query ${queryNum} on ${tableType} " +
+          s"table.")
     }
-    if(fullRSValidationFailed){
-      pw.println(s"\nFull resultset validation failed for query ${queryNum} on ${tableType} table.")
+    if (fullRSValidationFailed) {
+      pw.println(s"\n${logTime} Full resultset validation failed for query ${queryNum} on " +
+          s"${tableType} table.")
       validationFailed = true
     }
+    pw.println(s"${logTime}  Execution completed for query ${queryNum}")
     pw.flush()
     return validationFailed
   }
@@ -253,7 +263,7 @@ object SnappyTestUtils {
       val actualArray = actualRow.split(",")
       val expectedArray = expectedRow.split(",")
       var diff: Double = 0.0
-      if(actualArray.length == expectedArray.length){
+      if (actualArray.length == expectedArray.length) {
         for (i <- 0 until actualArray.length) {
           val value1: String = actualArray(i)
           val value2: String = expectedArray(i)
@@ -292,20 +302,20 @@ object SnappyTestUtils {
       val rc = p.waitFor
       val pbCmd = util.Arrays.toString(pb.command.toArray)
       if ((rc == 0) || (pbCmd.contains("grep -v -F") && rc == 1)) {
-        pw.println("Process executed successfully")
+        println("Process executed successfully")
         0
       }
       else {
-        pw.println("Process execution failed with exit code: " + rc)
+        println("Process execution failed with exit code: " + rc)
         1
       }
     } catch {
       case e: IOException =>
-        pw.println("Exception occurred while starting the process:" + pb + "\nError Message:" + e
+        println("Exception occurred while starting the process:" + pb + "\nError Message:" + e
             .getMessage)
         1
       case e: InterruptedException =>
-        pw.println("Exception occurred while waiting for the process execution:" + p + "\nError " +
+        println("Exception occurred while waiting for the process execution:" + p + "\nError " +
             "Message:" + e.getMessage)
         1
     }
@@ -376,19 +386,19 @@ object SnappyTestUtils {
       writer.close()
     } catch {
       case fe: FileNotFoundException =>
-        pw.println("Log exception while overwirting the result mismatch files", fe)
-        false
+        pw.println(s"${logTime} Exception while overwriting the result mismatch files", fe)
+        return true
     }
     val unexpectedResultsFile = new File(unexpectedFileName)
     val missingResultsFile = new File(missingFileName)
     command = "grep -v -F -x -f " + sparkResultsFile + " " + snappyResultsFile
     pb = new ProcessBuilder("/bin/bash", "-c", command)
-    pw.println("Executing command : " + command)
+    println("Executing command : " + command)
     // get the unexpected rows in snappy
     executeProcess(pb, unexpectedResultsFile, pw)
     command = "grep -v -F -x -f " + snappyResultsFile + " " + sparkResultsFile
     pb = new ProcessBuilder("/bin/bash", "-c", command)
-    pw.println("Executing command : " + command)
+    println("Executing command : " + command)
     // get the missing rows in snappy
     executeProcess(pb, missingResultsFile, pw)
     var unexpectedRsReader: Iterator[String] = null
@@ -398,7 +408,7 @@ object SnappyTestUtils {
       missingRsReader = Source.fromFile(missingResultsFile).getLines()
     } catch {
       case fe: FileNotFoundException =>
-        pw.println("Could not find file to compare results.", fe)
+        pw.println(s"${logTime} Could not find file to compare results.", fe)
         return true
     }
     val unexpected = new util.ArrayList[String]
@@ -410,7 +420,8 @@ object SnappyTestUtils {
         missing.add("\n  " + missingRsReader.next())
     } catch {
       case ie: IOException =>
-        pw.println("Got exception while reading resultset files", ie)
+        pw.println(s"${logTime} Got exception while reading resultset" +
+            s" files", ie)
     }
 
     if (missing.size > 0) {
@@ -436,18 +447,20 @@ object SnappyTestUtils {
       }
       aStr.append("\n")
     }
-    pw.println(aStr.toString)
 
-    if((missing.size() > 0) && missing.size() == unexpected.size()) {
+    // check if the mismatch is due to decimal, and can be ignored
+    if ((missing.size() > 0) && missing.size() == unexpected.size()) {
       Collections.sort(missing)
       Collections.sort(unexpected)
       for (i <- 0 until missing.size()) {
         if (!isIgnorable(missing.get(i), unexpected.get(i))) true
       }
-      pw.println("This mismatch can be ignored.")
+      // pw.println("This mismatch can be ignored.")
       aStr.setLength(0) // data mismatch can be ignored
     }
+
     if (aStr.length() > 0) {
+      pw.println(s"${logTime} ${aStr.toString}")
       true
     } else {
       false
@@ -480,11 +493,11 @@ object SnappyTestUtils {
     val snappyQueryFileName = s"Snappy_${queryNum}"
     val snappyDest: String = getQueryResultDir("snappyQueryFiles") +
         File.separator + snappyQueryFileName
-    pw.println(snappyDest)
+    pw.println(s" ${logTime} ${snappyDest}")
     val snappyFile: File = new java.io.File(snappyDest)
     var snappyDF = snc.sql(sqlString)
 
-    pw.println(goldenFileDest)
+    pw.println(s"${logTime} ${goldenFileDest}")
     val goldenFileName = goldenFileDest + File.separator + s"Spark_$queryNum"
     val sortedGoldenDest = goldenFileDest + File.separator + s"Sorted_$queryNum"
     val sortedGoldenFile: File = new java.io.File(sortedGoldenDest)
@@ -497,10 +510,10 @@ object SnappyTestUtils {
         snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
         writeToFile(snappyDF, snappyDest, snc)
         // writeResultSetToCsv(snappyDF, snappyFile)
-        pw.println(s"${queryNum} Result Collected in file $snappyDest")
+        pw.println(s"${logTime} ${queryNum} Result Collected in file $snappyDest")
       }
       if (!goldenFile.exists()) {
-        pw.println(s"Did not find any golden file for query $queryNum")
+        pw.println(s"${logTime} Did not find any golden file for query $queryNum")
         throw new Exception(s"Did not find any golden file for query $queryNum")
       } else if (goldenFile.length() > 0) {
         // sort the contents of golden file before comparing results
@@ -512,9 +525,9 @@ object SnappyTestUtils {
         goldenDF = goldenDF.repartition(1).sortWithinPartitions(col1, col: _*)
         writeToFile(goldenDF, sortedGoldenDest, snc)
         // writeResultSetToCsv(goldenDF, sortedGoldenFile)
-        pw.println(s"${queryNum} Result Collected in file ${sortedGoldenDest}")
+        pw.println(s"${logTime} ${queryNum} Result Collected in file ${sortedGoldenDest}")
       } else {
-        pw.println(s"zero results in query $queryNum.")
+        pw.println(s"${logTime} No results in query result file for $queryNum.")
       }
       hasValidationFailed = compareFiles(snappyFile, sortedGoldenFile, pw, queryNum,
         hasValidationFailed)
@@ -522,7 +535,8 @@ object SnappyTestUtils {
     } catch {
       case ex: Exception => {
         hasValidationFailed = true
-        pw.println(s"Full resultSet Validation failed for ${queryNum} with following exception:\n")
+        pw.println(s"${logTime} Full resultSet Validation failed for ${queryNum} with following " +
+            s"exception:\n")
         ex.printStackTrace(pw)
       }
     }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala
index 14bb8fee2c..971a510888 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesApp.scala
@@ -19,8 +19,10 @@ package io.snappydata.hydra.ct
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.sql.SnappyContext
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.spark.{SparkConf, SparkContext}
 
 object CreateAndLoadCTTablesApp {
 
@@ -37,9 +39,10 @@ object CreateAndLoadCTTablesApp {
     val persistenceMode = args(3)
     val pw = new PrintWriter(new FileOutputStream(new File("CreateAndLoadCTTablesApp.out"), true))
     // scalastyle:off println
-    pw.println(s"dataFilesLocation : ${dataFilesLocation}")
+    pw.println(s"${SnappyTestUtils.logTime} dataFilesLocation : ${dataFilesLocation}")
     CTTestUtil.dropTables(snc)
-    pw.println(s"Create and load for ${tableType} tables has started")
+    pw.println(s"${SnappyTestUtils.logTime} Create and load for ${tableType} tables " +
+        s"has started")
     pw.flush()
     tableType match {
       // replicated row tables
@@ -71,12 +74,13 @@ object CreateAndLoadCTTablesApp {
         throw new Exception(s"Did not find any match for ${tableType} to create tables." +
             s" See ${CTTestUtil.getCurrentDirectory}/CreateAndLoadCTTablesApp.out")
     }
-    pw.println("Tables are created. Now loading data.")
+    pw.println(s"${SnappyTestUtils.logTime} Tables are created. Now loading data.")
     pw.flush()
     CTTestUtil.loadTables(snc)
     println(s"Create and load for ${tableType} tables has completed successfully. " +
         s"See ${CTTestUtil.getCurrentDirectory}/CreateAndLoadCTTablesApp.out")
-    pw.println(s"Create and load for ${tableType} tables has completed successfully")
+    pw.println(s"${SnappyTestUtils.logTime} Create and load for ${tableType} tables " +
+        s"has completed successfully")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala
index a5db1a513d..994315fb6e 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/CreateAndLoadCTTablesJob.scala
@@ -18,11 +18,14 @@
 package io.snappydata.hydra.ct
 
 import java.io.{File, FileOutputStream, PrintWriter}
+
 import scala.util.{Failure, Success, Try}
+
 import com.typesafe.config.Config
+import io.snappydata.hydra.SnappyTestUtils
 import util.TestException
 
-import org.apache.spark.sql.{SnappySession, SnappyJobValid, SnappyJobValidation, SnappySQLJob}
+import org.apache.spark.sql.{SnappyJobValid, SnappyJobValidation, SnappySQLJob, SnappySession}
 
 class CreateAndLoadCTTablesJob extends SnappySQLJob {
 
@@ -30,17 +33,17 @@ class CreateAndLoadCTTablesJob extends SnappySQLJob {
     val pw = new PrintWriter(new FileOutputStream(new File("CreateAndLoadCTTablesJob.out"), true));
     val tableType = jobConfig.getString("tableType")
     // scalastyle:off println
-    pw.println("In create and load tables Job")
     Try {
       val snc = snSession.sqlContext
       snc.sql("set spark.sql.shuffle.partitions=6")
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
       val redundancy = jobConfig.getString("redundancy")
-      pw.println(s"Data files are at : ${dataFilesLocation}")
+      pw.println(s"${SnappyTestUtils.logTime} dataFilesLocation : ${dataFilesLocation}")
       snc.setConf("dataFilesLocation", dataFilesLocation)
       CTQueries.snc = snc
       CTTestUtil.dropTables(snc)
-      pw.println(s"Create and load for ${tableType} tables has started...")
+      pw.println(s"${SnappyTestUtils.logTime} Create and load for ${tableType} tables" +
+          s" has started...")
       pw.flush()
       tableType match {
         // replicated row tables
@@ -73,12 +76,13 @@ class CreateAndLoadCTTablesJob extends SnappySQLJob {
           throw new TestException(s"Did not find any match for ${tableType} to create tables." +
               s" See ${CTTestUtil.getCurrentDirectory}/CreateAndLoadCTTablesJob.out")
       }
-      pw.println("Tables are created. Now loading data.")
+      pw.println(s"${SnappyTestUtils.logTime} Tables are created. Now loading data.")
       pw.flush()
       CTTestUtil.loadTables(snc);
       println(s"Create and load for ${tableType} tables has completed successfully. " +
           s"See ${CTTestUtil.getCurrentDirectory}/CreateAndLoadCTTablesJob.out")
-      pw.println(s"Create and load for ${tableType} tables has completed successfully")
+      pw.println(s"${SnappyTestUtils.logTime} Create and load for ${tableType} tables" +
+          s" has completed successfully")
       pw.close()
     } match {
       case Success(v) => pw.close()
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
index 46997cb155..21a28e18e8 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
@@ -42,31 +42,34 @@ object ValidateCTQueriesApp {
     val dataFilesLocation = args(0)
     snc.setConf("dataFilesLocation", dataFilesLocation)
     // scalastyle:off println
-    pw.println(s"dataFilesLocation : ${dataFilesLocation}")
     CTQueries.snc = snc
     val tableType = args(1)
     val fullResultSetValidation: Boolean = args(2).toBoolean
     val numRowsValidation: Boolean = args(3).toBoolean
     SnappyTestUtils.numRowsValidation = numRowsValidation
     SnappyTestUtils.validateFullResultSet = fullResultSetValidation
-    pw.println(s"Validation for queries with ${tableType} tables started")
+    pw.println(s"${SnappyTestUtils.logTime} Validation for queries with ${tableType} " +
+        s"tables started")
 
     val startTime = System.currentTimeMillis
     val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, sqlContext)
     val endTime = System.currentTimeMillis
     val totalTime = (endTime - startTime) / 1000
-    pw.println(s"Total time for execution is :: ${totalTime} seconds.")
+    pw.println(s"${SnappyTestUtils.logTime} Total time for execution is :" +
+        s" ${totalTime} seconds.")
     if (!failedQueries.isEmpty) {
       println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. See " +
           s"${getCurrentDirectory}/${outputFile}")
-      pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+      pw.println(s"${SnappyTestUtils.logTime} Validation failed for ${tableType} " +
+          s"tables for queries ${failedQueries}. ")
       pw.close()
       throw new Exception(s"Validation task failed for ${tableType}. See " +
           s"${getCurrentDirectory}/${outputFile}")
     }
     println(s"Validation for queries with ${tableType} tables completed successfully. See " +
         s"${getCurrentDirectory}/${outputFile}")
-    pw.println(s"Validation for queries with ${tableType} tables completed successfully.")
+    pw.println(s"${SnappyTestUtils.logTime} Validation for queries with ${tableType} " +
+        s"tables completed successfully.")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
index c0c5f91506..3cff0c39ec 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
@@ -45,7 +45,8 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       snc.setConf("dataFilesLocation", dataFilesLocation)
       CTQueries.snc = snc
       // scalastyle:off println
-      pw.println(s"Validation for $tableType tables started in snappy Job")
+      pw.println(s"${SnappyTestUtils.logTime} Validation for $tableType tables " +
+          s"started in snappy Job")
       val numRowsValidation: Boolean = jobConfig.getBoolean("numRowsValidation")
       val fullResultSetValidation: Boolean = jobConfig.getBoolean("fullResultSetValidation")
       SnappyTestUtils.validateFullResultSet = fullResultSetValidation
@@ -56,19 +57,21 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       val failedQueries = CTTestUtil.executeQueries(snc, tableType, pw, sqlContext)
       val endTime = System.currentTimeMillis
       val totalTime = (endTime - startTime) / 1000
+      pw.println(s"${SnappyTestUtils.logTime} Total execution took ${totalTime} " +
+          s"seconds.")
       if(!failedQueries.isEmpty) {
         println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
-        pw.println(s"Total execution took ${totalTime} seconds.")
-        pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+        pw.println(s"${SnappyTestUtils.logTime} Validation failed for ${tableType} " +
+            s"tables for queries ${failedQueries}. ")
         pw.close()
         throw new TestException(s"Validation task failed for ${tableType}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
       }
       println(s"Validation for $tableType tables completed sucessfully. " +
           s"See ${getCurrentDirectory}/${outputFile}")
-      pw.println(s"ValidateQueries for ${tableType} tables completed successfully in " +
-          totalTime + " seconds ")
+      pw.println(s"${SnappyTestUtils.logTime} ValidateQueries for ${tableType} tables" +
+          s" completed successfully.")
       pw.close()
     } match {
       case Success(v) => pw.close()
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesJob.scala
index 36e5fbecee..2392affbdf 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesJob.scala
@@ -19,9 +19,9 @@ package io.snappydata.hydra.northwind
 import java.io.{File, FileOutputStream, PrintWriter}
 
 import com.typesafe.config.Config
-import io.snappydata.hydra.northwind
-import org.apache.spark.sql._
+import io.snappydata.hydra.{SnappyTestUtils, northwind}
 
+import org.apache.spark.sql._
 import scala.util.{Failure, Success, Try}
 
 class CreateAndLoadNWTablesJob extends SnappySQLJob {
@@ -33,18 +33,15 @@ class CreateAndLoadNWTablesJob extends SnappySQLJob {
       // scalastyle:off println
       println("jobConfig.entrySet().size() : " + jobConfig.entrySet().size())
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
-      pw.println(s"dataFilesLocation is : ${dataFilesLocation}")
-      println(s"dataFilesLocation is : ${dataFilesLocation}")
+      pw.println(s"${SnappyTestUtils.logTime} dataFilesLocation : ${dataFilesLocation}")
       val tableType = jobConfig.getString("tableType")
-      pw.println(s"tableType : " + tableType)
-      println(s"tableType : " + tableType)
       snc.setConf("dataFilesLocation", dataFilesLocation)
       val createLargeOrderTable = jobConfig.getString("createLargeOrderTable").toBoolean
       northwind.NWQueries.snc = snc
       NWQueries.dataFilesLocation = dataFilesLocation
       NWTestUtil.dropTables(snc)
-      pw.println(s"Create and load ${tableType} tables Test started at : " + System
-          .currentTimeMillis)
+      pw.println(s"${SnappyTestUtils.logTime} Create and load ${tableType} tables " +
+          s"Test has started")
       tableType match {
         case "ReplicatedRow" => NWTestUtil.createAndLoadReplicatedTables(snc)
         case "PartitionedRow" =>
@@ -53,14 +50,13 @@ class CreateAndLoadNWTablesJob extends SnappySQLJob {
         case "Colocated" => NWTestUtil.createAndLoadColocatedTables(snc)
         case _ => // the default, catch-all
       }
-      pw.println(s"Create and load ${tableType} tables Test completed successfully at : " +
-          System.currentTimeMillis)
+      pw.println(s"${SnappyTestUtils.logTime} Create and load ${tableType} tables " +
+          s"Test completed successfully")
       pw.flush()
       if (createLargeOrderTable) {
         NWTestUtil.ingestMoreData(snc, 10)
       }
-      pw.println(s"Loaded more data successfully at : " +
-          System.currentTimeMillis)
+      pw.println(s"${SnappyTestUtils.logTime} Loaded more data successfully")
       pw.close()
     } match {
       case Success(v) => pw.close()
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala
index 1c688b8359..18a76ca59d 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/CreateAndLoadNWTablesSparkApp.scala
@@ -18,8 +18,10 @@ package io.snappydata.hydra.northwind
 
 import java.io.{File, FileOutputStream, PrintWriter}
 
+import io.snappydata.hydra.SnappyTestUtils
+
 import org.apache.spark.sql.SnappyContext
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.spark.{SparkConf, SparkContext}
 
 object CreateAndLoadNWTablesSparkApp {
 
@@ -38,9 +40,10 @@ object CreateAndLoadNWTablesSparkApp {
     // scalastyle:off println
     val pw = new PrintWriter(new FileOutputStream(new File("CreateAndLoadNWTablesSparkApp.out"),
       true));
-    pw.println(s"dataFilesLocation : ${dataFilesLocation}")
+    pw.println(s"${SnappyTestUtils.logTime} dataFilesLocation : ${dataFilesLocation}")
     NWTestUtil.dropTables(snc)
-    pw.println(s"Create and load ${tableType} tables Test started at : " + System.currentTimeMillis)
+    pw.println(s"${SnappyTestUtils.logTime} Create and load ${tableType} tables Test " +
+        s"started")
     tableType match {
       case "ReplicatedRow" => NWTestUtil.createAndLoadReplicatedTables(snc)
       case "PartitionedRow" => NWTestUtil.createAndLoadPartitionedTables(snc, createLargeOrderTable)
@@ -48,14 +51,13 @@ object CreateAndLoadNWTablesSparkApp {
       case "Colocated" => NWTestUtil.createAndLoadColocatedTables(snc)
       case _ => // the default, catch-all
     }
-    pw.println(s"Create and load ${tableType} tables Test completed successfully at : " + System
-        .currentTimeMillis)
+    pw.println(s"${SnappyTestUtils.logTime} Create and load ${tableType} tables Test " +
+        s"completed successfully.")
     pw.flush()
     if (createLargeOrderTable) {
       NWTestUtil.ingestMoreData(snc, 10)
     }
-    pw.println(s"Loaded more data successfully at : " + System
-        .currentTimeMillis)
+    pw.println(s"${SnappyTestUtils.logTime} Loaded more data successfully.")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index 18f4caeb75..e8f0caffaf 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -75,12 +75,12 @@ object NWTestUtil {
     var failedQueries = ""
     if (SnappyTestUtils.validateFullResultSet) {
       // scalastyle:off println
-      pw.println(s"createAndLoadSparkTables started ...")
+      pw.println(s"${SnappyTestUtils.logTime} CreateAndLoadSparkTables started ...")
       val startTime = System.currentTimeMillis
       NWTestUtil.createAndLoadSparkTables(sqlContext)
       val finishTime = System.currentTimeMillis()
-      pw.println(s"createAndLoadSparkTables completed successfully in : " + ((finishTime -
-          startTime) / 1000) + " seconds")
+      pw.println(s"${SnappyTestUtils.logTime} CreateAndLoadSparkTables completed successfully in " +
+          s": " + ((finishTime - startTime) / 1000) + " seconds")
     }
     for (q <- NWQueries.queries) {
       var queryExecuted = false;
@@ -335,19 +335,17 @@ object NWTestUtil {
         case "Q56" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56",
           pw, sqlContext, usePlanCaching)
           if (executeQueriesByChangingConstants) {
-            hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56_1", pw,
+            hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56_1, 8, "Q56_1", pw,
               sqlContext, usePlanCaching)
-            hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56_2", pw,
+            hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56_2, 8, "Q56_2", pw,
               sqlContext, usePlanCaching)
-            hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56_3", pw,
+            hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56_3, 8, "Q56_3", pw,
               sqlContext, usePlanCaching)
           }; queryExecuted = true
         case _ => // do nothing
       }
-      if (queryExecuted) {
-        pw.println(s"Execution completed for query ${q._1}")
-      } else {
-        pw.println(s"Did not execute ${q._1}.")
+      if (!queryExecuted) {
+        pw.println(s"${SnappyTestUtils.logTime}  Did not execute ${q._1}.")
       }
       if (hasValidationFailed) {
         failedQueries = SnappyTestUtils.addToFailedQueryList(failedQueries, q._1)
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index b35f07d5f3..2918f81ba4 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -53,7 +53,8 @@ object ValidateNWQueriesApp {
     var failedQueries = ""
     // scalastyle:off println
     val startTime = System.currentTimeMillis()
-    pw.println(s"ValidateQueries for ${tableType} tables started ..")
+    pw.println(s"${SnappyTestUtils.logTime} ValidateQueries for ${tableType} tables " +
+        s"started ..")
     if (isSmokeRun) {
       NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw, sqlContext)
     }
@@ -65,14 +66,16 @@ object ValidateNWQueriesApp {
     if (!failedQueries.isEmpty) {
       println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. See " +
           s"${getCurrentDirectory}/${outputFile}")
-      pw.println(s"Total execution took ${totalTime} seconds.")
-      pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+      pw.println(s"${SnappyTestUtils.logTime} Total execution took ${totalTime} " +
+          s"seconds.")
+      pw.println(s"${SnappyTestUtils.logTime} Validation failed for ${tableType} " +
+          s"tables for queries ${failedQueries}. ")
       pw.close()
       throw new Exception(s"Validation task failed for ${tableType} tables. See " +
           s"${getCurrentDirectory}/${outputFile}")
     }
-    pw.println(s"ValidateQueries for ${tableType} tables completed successfully in ${totalTime} " +
-        s"seconds ")
+    pw.println(s"${SnappyTestUtils.logTime} ValidateQueries for ${tableType} tables " +
+        s"completed successfully in ${totalTime} seconds ")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
index c90a029256..d9f34a0b08 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
@@ -52,7 +52,8 @@ class ValidateNWQueriesJob extends SnappySQLJob {
       NWQueries.dataFilesLocation = dataFilesLocation
       // scalastyle:off println
       val startTime = System.currentTimeMillis()
-      pw.println(s"ValidateQueries for ${tableType} tables started ..")
+      pw.println(s"${SnappyTestUtils.logTime} ValidateQueries for ${tableType} tables" +
+          s" started ..")
       if (isSmokeRun) {
         failedQueries = NWTestUtil.validateSelectiveQueriesFullResultSet(snc, tableType, pw,
           sqlContext)
@@ -62,17 +63,18 @@ class ValidateNWQueriesJob extends SnappySQLJob {
       }
       val finishTime = System.currentTimeMillis()
       val totalTime = (finishTime -startTime)/1000
+      pw.println(s"${SnappyTestUtils.logTime} Total execution took ${totalTime} " +
+          s"seconds.")
       if (!failedQueries.isEmpty) {
         println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
-        pw.println(s"Total execution took ${totalTime} seconds.")
-        pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+        pw.println(s"${SnappyTestUtils.logTime} Validation failed for ${tableType} " +
+            s"tables for queries ${failedQueries}. ")
         pw.close()
         throw new TestException(s"Validation task failed for ${tableType}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
       }
-      pw.println(s"ValidateQueries for ${tableType} tables completed successfully in " +
-          totalTime + " seconds ")
+      pw.println(s"ValidateQueries for ${tableType} tables completed successfully.")
       pw.close()
     } match {
       case Success(v) => pw.close()

From a5ac251b7263be0116d418f3be2130c2cfc3a53c Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Wed, 5 Dec 2018 15:52:36 +0530
Subject: [PATCH 23/33] - Fixing minor issue in northwind test with eviction. -
 Adding thread_id to validation spark app. - Adding check for OOME or JVM
 crash in test run.

---
 .../snappydata/hydra/cluster/SnappyTest.java  | 33 +++++++++++++++++--
 .../hydra/cluster/expectedExceptionList.inc   |  5 +--
 ...onedRowTablesWithEvictionOverflowTest.conf |  2 +-
 .../io/snappydata/hydra/SnappyTestUtils.scala |  2 +-
 .../hydra/ct/ValidateCTQueriesApp.scala       |  5 +--
 .../northwind/ValidateNWQueriesApp.scala      |  4 +--
 6 files changed, 41 insertions(+), 10 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index bb37f50be5..4c782f511c 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -2803,6 +2803,7 @@ public static synchronized void HydraTask_checkSuspectStrings() {
   }
 
   public void checkSuspectStrings() {
+    String hasOOMEORJVMCrash = checkForJVMCrashOrOOME();
     String checkSuspectOutPut = getCurrentDirPath() + File.separator + "suspectStrings.txt";
     File suspectStringFile = new File(checkSuspectOutPut);
     StringBuilder cmd = new StringBuilder();
@@ -2810,9 +2811,9 @@ public void checkSuspectStrings() {
 
     String[] expectedExceptions = SnappyPrms.getExpectedExceptionList();
     if(cycleVms) {
-      List<String> exceptions = Arrays.asList(expectedExceptions);
+      List<String> exceptions = new ArrayList<String>(Arrays.asList(expectedExceptions));
       exceptions.addAll(Arrays.asList(SnappyPrms.getExpectedExceptionListForHA()));
-      expectedExceptions = (String[])exceptions.toArray();
+      expectedExceptions = exceptions.toArray(expectedExceptions);
     }
     for (int i = 0; i < expectedExceptions.length; i++)
       exceptedExcep.append(" | grep -v \"").append(expectedExceptions[i] + "\"");
@@ -2845,9 +2846,37 @@ public void checkSuspectStrings() {
 
       }
     }
+    if(hasOOMEORJVMCrash.length()>0){
+      throw new TestException(hasOOMEORJVMCrash);
+    }
   }
 
 
+  public String checkForJVMCrashOrOOME() {
+    StringBuilder msg = new StringBuilder();
+    String oomeOutPut = getCurrentDirPath() + File.separator + "checkOOME.txt";
+    File oomeOutPutFile = new File(oomeOutPut);
+
+    StringBuilder cmd = new StringBuilder();
+    cmd.append("find " + getCurrentDirPath() + " -type f \\( -name \"*.hprof\" \\)");
+    ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", cmd.toString());
+    executeProcess(pb, oomeOutPutFile);
+    if(oomeOutPutFile.length() != 0) {
+      msg.append("There is OOME observed in the test. Please check logs for more details.\n");
+    }
+    cmd.setLength(0);
+    String jvmCrashOutput = getCurrentDirPath() + File.separator + "checkJVMCrash.txt";
+    File jvmCrashOutputFile = new File(jvmCrashOutput);
+    cmd.append("find " + getCurrentDirPath() + " -type f \\( -name \"hs_err*.log\" \\)");
+    pb = new ProcessBuilder("/bin/bash", "-c", cmd.toString());
+    executeProcess(pb, jvmCrashOutputFile);
+    if(jvmCrashOutputFile.length() != 0) {
+      msg.append("There is HOTSPOT error observed in the test. Please check logs for more details" +
+          ". \n");
+    }
+    return msg.toString();
+  }
+
   /**
    * Create and start snappy server.
    */
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
index 416e0e5034..d1ad388493 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/expectedExceptionList.inc
@@ -14,6 +14,7 @@ io.snappydata.hydra.cluster.SnappyPrms-expectedExceptions =
 "org.apache.spark.SparkException: Exception thrown in awaitResult"
 "com.gemstone.gemfire.distributed.LockServiceDestroyedException"
 "Failed to retrieve information for"
+"WARN LeadImpl: Exception while starting lead node"
 ;
 
 io.snappydata.hydra.cluster.SnappyPrms-expectedExceptionsForHA =
@@ -22,8 +23,8 @@ io.snappydata.hydra.cluster.SnappyPrms-expectedExceptionsForHA =
 ;
 
 io.snappydata.hydra.cluster.SnappyPrms-knownExceptionsForHA =
-"java.lang.reflect.InvocationTargetException"
-"java.lang.IllegalStateException: Cannot call methods on a stopped SparkContext"
+"java.net.ConnectException: Connection refused"
+"Caused by: org.apache.thrift.transport.TTransportException: Failed to create or configure socket."
 ;
 
 
diff --git a/dtests/src/test/java/io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesWithEvictionOverflowTest.conf b/dtests/src/test/java/io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesWithEvictionOverflowTest.conf
index de73b73fb4..11b1d95402 100644
--- a/dtests/src/test/java/io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesWithEvictionOverflowTest.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/northwind/nwPersistentPartitionedRowTablesWithEvictionOverflowTest.conf
@@ -18,7 +18,7 @@ INITTASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = H
             io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.northwind.ValidateNWQueriesApp
             io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${isSmokeRun} ${numRowsValidation}"
             io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
-            threadGroups = leadThreads
+            threadGroups = snappyThreads
             ;
 
 TASK        taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 897e506c69..4e52049156 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -185,7 +185,7 @@ object SnappyTestUtils {
           s"${tableType} table.")
       validationFailed = true
     }
-    pw.println(s"${logTime}  Execution completed for query ${queryNum}")
+    pw.println(s"${logTime} Execution completed for query ${queryNum}")
     pw.flush()
     return validationFailed
   }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
index 21a28e18e8..db308c4979 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesApp.scala
@@ -27,15 +27,16 @@ import org.apache.spark.{SparkConf, SparkContext}
 object ValidateCTQueriesApp {
 
   def main(args: Array[String]) {
+
+    val threadID = Thread.currentThread().getId
     val conf = new SparkConf().
-        setAppName("ValidateCTQueriesApp Application_" + System.currentTimeMillis())
+        setAppName("ValidateCTQueriesApp_" + threadID + "_" + System.currentTimeMillis())
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
 
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
 
-    val threadID = Thread.currentThread().getId
     val outputFile = "ValidateCTQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true))
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
index 2918f81ba4..bd2b0f9335 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesApp.scala
@@ -26,8 +26,9 @@ import org.apache.spark.{SparkConf, SparkContext}
 object ValidateNWQueriesApp {
 
   def main(args: Array[String]) {
+    val threadID = Thread.currentThread().getId
     val conf = new SparkConf().
-        setAppName("ValidateNWQueries Application_" + System.currentTimeMillis())
+        setAppName("ValidateNWQueriesApplication_" + threadID + "_" + System.currentTimeMillis())
     val sc = SparkContext.getOrCreate(conf)
     val sqlContext = SQLContext.getOrCreate(sc)
     val snc = SnappyContext(sc)
@@ -46,7 +47,6 @@ object ValidateNWQueriesApp {
 
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
 
-    val threadID = Thread.currentThread().getId
     val outputFile = "ValidateNWQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));

From b2aca95fa6b2ec06e43db3835fade4dcd4103219 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Mon, 31 Dec 2018 13:53:39 +0530
Subject: [PATCH 24/33] - Some formating and code refactoring.

---
 .../adAnalytics/SnappyAdAnalyticsTest.java    |  52 +++----
 .../snappydata/hydra/cluster/SnappyTest.java  | 133 +++++++++++++-----
 .../ct/ctSplitModeCreateWithScriptLeadHA.conf |   8 ++
 .../hydra/ct/ValidateCTQueriesJob.scala       |   4 +-
 .../northwind/ValidateNWQueriesJob.scala      |   8 +-
 ...ateNWQueriesWithChangingConstantsApp.scala |  24 ++--
 ...ateNWQueriesWithChangingConstantsJob.scala |  28 ++--
 7 files changed, 159 insertions(+), 98 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
index 86a5f5fa63..e6c587ca75 100644
--- a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
@@ -307,18 +307,35 @@ protected void executeSnappyStreamingJob(Vector jobClassNames, String logFileNam
         }
         Log.getLogWriter().info("JobID is : " + jobID);
         for (int j = 0; j < 3; j++) {
-          if(!getJobStatus(jobID)){
-            throw new TestException("Got Exception while executing streaming job. Please check " +
-                "the job status output.");
+          try {
+            Thread.sleep(10 * 1000);
+          } catch (InterruptedException ie) {
           }
+          getJobStatus(jobID);
+        }
+        if(!checkJobStatus(jobID)){
+          throw new TestException("Got Exception while executing streaming job. Please check " +
+              "the job status output.");
         }
       }
-
     } catch (IOException e) {
       throw new TestException("IOException occurred while retriving destination logFile path " + log + "\nError Message:" + e.getMessage());
     }
   }
 
+  public void getJobStatus(String jobID) {
+    String snappyJobScript = getScriptLocation("snappy-job.sh");
+    leadHost = getLeadHost();
+    String leadPort = (String) SnappyBB.getBB().getSharedMap().get("primaryLeadPort");
+    String dest = getCurrentDirPath() + File.separator + "jobStatus_" + getMyTid() + "_" + jobID +
+        ".log";
+    File commandOutput = new File(dest);
+    String command = snappyJobScript + " status --lead " + leadHost + ":" + leadPort + " " +
+        "--job-id " + jobID + " > " + commandOutput;
+    ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", command);
+    executeProcess(pb, null);
+  }
+
   public static void HydraTask_executeSQLScriptsWithSleep() {
     try { Thread.sleep(30000); } catch (InterruptedException ie) {}
     HydraTask_executeSQLScripts();
@@ -344,33 +361,6 @@ public static void HydraTask_restartSnappyClusterForStreaming(){
    HydraTask_executeSnappyStreamingJob();
   }
 
-  public boolean getJobStatus(String jobID){
-    String snappyJobScript = getScriptLocation("snappy-job.sh");
-    leadHost = getLeadHost();
-    String leadPort = (String) SnappyBB.getBB().getSharedMap().get("primaryLeadPort");
-    try {
-      String dest = getCurrentDirPath() + File.separator + "jobStatus_" + RemoteTestModule
-          .getCurrentThread().getThreadId() + "_" + System.currentTimeMillis() + ".log";
-      File commandOutput = new File(dest);
-      String command = snappyJobScript + " status --lead " + leadHost + ":" + leadPort + " " +
-          "--job-id " + jobID + " > " + commandOutput;
-      ProcessBuilder pb = new ProcessBuilder("/bin/bash", "-c", command);
-      executeProcess(pb, null);
-      String line = null;
-      BufferedReader inputFile = new BufferedReader(new FileReader(commandOutput));
-      while ((line = inputFile.readLine()) != null) {
-        if(line.contains("status") ){
-          if (line.contains("ERROR"))
-              return false;
-          break;
-        }
-      } try { Thread.sleep(10*1000);} catch(InterruptedException ie) { }
-    } catch (IOException ie){
-      Log.getLogWriter().info("Got exception while accessing current dir");
-    }
-    return true;
-  }
-
   /* Generator and Publisher for StringMessageProducer
   */
   public static void HydraTask_generateAndPublishMethod() {
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 4c782f511c..c6ce5d5657 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -51,7 +51,11 @@
 import sql.SQLPrms;
 import sql.dmlStatements.DMLStmtIF;
 import sql.sqlutil.DMLStmtsFactory;
-import util.*;
+import util.PRObserver;
+import util.StopStartPrms;
+import util.StopStartVMs;
+import util.TestException;
+import util.TestHelper;
 
 public class SnappyTest implements Serializable {
 
@@ -1817,10 +1821,10 @@ public int executeProcess(ProcessBuilder pb, File logFile) {
       int rc = p.waitFor();
       String pbCmd = Arrays.toString(pb.command().toArray());
       if ((rc == 0) || (pbCmd.contains("grep -v -F") && rc == 1)) {
-        Log.getLogWriter().info("Executed successfully");
+        Log.getLogWriter().info("Process executed successfully");
         return 0;
       } else {
-        Log.getLogWriter().info("Failed with exit code: " + rc);
+        Log.getLogWriter().info("Process failed with exit code: " + rc);
         return 1;
       }
     } catch (IOException e) {
@@ -2239,11 +2243,12 @@ public void executeSnappyJob(Vector jobClassNames, String logFileName, String
         snappyTest.executeProcess(pb, logFile);
         pb = new ProcessBuilder("/bin/bash", "-c", curlCommand2);
         int status = snappyTest.executeProcess(pb, logFile);
-        if(status==1 && !cycleVms){
-          throw new TestException("Snappy job execution has failed. Please check the logs.");
-        }
       }
       boolean retry = snappyTest.getSnappyJobsStatus(snappyJobScript, logFile, leadPort);
+      if(!checkJobStatus(getJobIDs(logFile)) && !cycleVms){
+        throw new TestException
+            ("Snappy job execution has failed. Please check the logs.");
+      }
       if (retry && jobSubmissionCount <= SnappyPrms.getRetryCountForJob()) {
         jobSubmissionCount++;
         Thread.sleep(180000);
@@ -2300,7 +2305,6 @@ public String setCDCSparkAppCmds(String userAppArgs, String commonArgs, String s
 
   public void executeSparkJob(Vector jobClassNames, String logFileName) {
     String snappyJobScript = getScriptLocation("spark-submit");
-    boolean isCDCStream = SnappyCDCPrms.getIsCDCStream();
     ProcessBuilder pb = null;
     File log = null, logFile = null;
     userAppJar = SnappyPrms.getUserAppJar();
@@ -2339,9 +2343,6 @@ public void executeSparkJob(Vector jobClassNames, String logFileName) {
         Log.getLogWriter().info("spark-submit command is : " + command);
         pb = new ProcessBuilder("/bin/bash", "-c", command);
         int status = snappyTest.executeProcess(pb, logFile);
-        if(status==1 && !cycleVms){
-          throw new TestException("Spark Application has failed. Please check the logs.");
-        }
         Log.getLogWriter().info("CDC stream is : " + SnappyCDCPrms.getIsCDCStream());
         if (SnappyCDCPrms.getIsCDCStream()) {
           //wait for 2 min until the cdc streams starts off.
@@ -2353,6 +2354,9 @@ public void executeSparkJob(Vector jobClassNames, String logFileName) {
           Log.getLogWriter().info("Inside getIsCDCStream : " + SnappyCDCPrms.getIsCDCStream());
           return;
         }
+        if(status==1 && !cycleVms){
+          throw new TestException("Spark Application has failed. Please check the logs.");
+        }
         String searchString = "Spark ApplicationEnd: ";
         String expression = "cat " + logFile + " | grep -e Exception -e '" + searchString + "' |" +
             " grep -v java.net.BindException" + " | wc -l)\"";
@@ -2481,6 +2485,56 @@ protected void simulateStream() {
     }
   }
 
+  public String getJobIDs(File logFile) {
+    String line = null;
+    String jobIDs = null;
+    try {
+      BufferedReader inputFile = new BufferedReader(new FileReader(logFile));
+      while ((line = inputFile.readLine()) != null) {
+        if (line.contains("jobId")) {
+          String temp  = line.split(":")[1].trim();
+          jobIDs = temp.substring(1, temp.length() - 2);
+          jobIDs += ",";
+        }
+      }
+      inputFile.close();
+    } catch (IOException ie) {
+      throw new TestException("Exception while reading job status file.");
+    }
+    if (jobIDs == null) {
+      throw new TestException("Failed to start the snappy job. Please check the logs.");
+    }
+    Log.getLogWriter().info("JobID is : " + jobIDs);
+    return jobIDs;
+  }
+
+  public boolean checkJobStatus(String jobIDs){
+    String[] jobIDArr = jobIDs.split(",");
+    for(int i = 0; i< jobIDArr.length; i++){
+      try {
+        String dest = getCurrentDirPath() + File.separator + "jobStatus_" + getMyTid() + "_" +
+            jobIDArr[i] + ".log";
+        File commandOutput = new File(dest);
+        String line = null;
+        BufferedReader inputFile = new BufferedReader(new FileReader(commandOutput));
+        while ((line = inputFile.readLine()) != null) {
+          if (line.contains("status")) {
+            if (line.contains("ERROR"))
+              return false;
+            break;
+          }
+        }
+        try {
+          Thread.sleep(10 * 1000);
+        } catch (InterruptedException ie) {
+        }
+      } catch (IOException ie) {
+        Log.getLogWriter().info("Got exception while accessing current dir", ie);
+      }
+    }
+    return true;
+  }
+
   public boolean getSnappyJobsStatus(String snappyJobScript, File logFile, String leadPort) {
     boolean found = false;
     try {
@@ -2498,8 +2552,8 @@ public boolean getSnappyJobsStatus(String snappyJobScript, File logFile, String
       inputFile.close();
       for (String str : jobIds) {
         File log = new File(".");
-        String dest = log.getCanonicalPath() + File.separator + "jobStatus_" + RemoteTestModule
-            .getCurrentThread().getThreadId() + "_" + System.currentTimeMillis() + ".log";
+        String dest = log.getCanonicalPath() + File.separator + "jobStatus_" + getMyTid()
+            + "_" + str + ".log";
         File commandOutput = new File(dest);
         String expression = snappyJobScript + " status --lead " + leadHost + ":" + leadPort + " " +
             "--job-id " + str + " > " + commandOutput + " 2>&1 ; grep -e '\"status\": " +
@@ -2772,7 +2826,6 @@ public static synchronized void HydraTask_createAndStartSnappyLocator() {
   public static synchronized void HydraTask_dumpStacks() {
     snappyTest = new SnappyTest();
     snappyTest.getClientHostDescription();
-    int tid = RemoteTestModule.getCurrentThread().getThreadId();
     snappyTest.dumpStacks();
   }
 
@@ -2783,9 +2836,12 @@ public void dumpStacks() {
     if (checkErrors || checkHang) {
       int dumpItr = SnappyPrms.getNumOfStackDumpItrs();
       Set pids = getPidList();
+      Log.getLogWriter().info("Dump stacks for " + dumpItr + " iterations.");
       for (int i = 0; i < dumpItr; i++) {
+        Log.getLogWriter().info("Dumping stacks for iteration " + i);
         getThreadDump(pids);
         if (i < (dumpItr - 1)) {
+          Log.getLogWriter().info("Sleeping before next thread dump...");
           sleepForMs(SnappyPrms.getSleepBtwnStackDumps());
         }
       }
@@ -3337,34 +3393,37 @@ protected void getThreadDump(Set<String> pidList) {
     Process pr = null;
     ProcessBuilder pb;
     File logFile = new File(thrDumpScript);
-    String thrDumpLog = getCurrentDirPath() + File.separator + "threadDump.log";
-    File thrDumpOutput = new File(thrDumpLog);
     try {
-      FileWriter fw = new FileWriter(logFile.getAbsoluteFile(), true);
-      BufferedWriter bw = new BufferedWriter(fw);
-      for (String pidString : pidList) {
-        int pid = Integer.parseInt(pidString);
-        String host = snappyTest.getPidHost(Integer.toString(pid));
-        if (host.equalsIgnoreCase("localhost")) {
-          bw.write("kill -23 " + pid);
-          bw.newLine();
-          bw.write("sleep 4;");
-          bw.newLine();
-          bw.write("kill -3 " + pid);
-        } else {
-          bw.write("ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " +
-              host + " kill -23 " + pid);
-          bw.newLine();
-          bw.write("sleep 4;");
+      if (!logFile.exists()) {
+        FileWriter fw = new FileWriter(logFile.getAbsoluteFile(), true);
+        BufferedWriter bw = new BufferedWriter(fw);
+        for (String pidString : pidList) {
+          int pid = Integer.parseInt(pidString);
+          String host = snappyTest.getPidHost(Integer.toString(pid));
+          if (host.equalsIgnoreCase("localhost")) {
+            bw.write("kill -23 " + pid);
+            bw.newLine();
+            bw.write("sleep 4;");
+            bw.newLine();
+            bw.write("kill -3 " + pid);
+          } else {
+            bw.write("ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " +
+                host + " kill -23 " + pid);
+            bw.newLine();
+            bw.write("sleep 4;");
+            bw.newLine();
+            bw.write("ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " +
+                host + " kill -3 " + pid);
+          }
           bw.newLine();
-          bw.write("ssh -n -x -o PasswordAuthentication=no -o StrictHostKeyChecking=no " +
-              host + " kill -3 " + pid);
         }
-        bw.newLine();
+        bw.close();
+        fw.close();
+        logFile.setExecutable(true);
       }
-      bw.close();
-      fw.close();
-      logFile.setExecutable(true);
+      String thrDumpLog = getCurrentDirPath() + File.separator + "threadDump.log";
+      File thrDumpOutput = new File(thrDumpLog);
+
       pb = new ProcessBuilder(thrDumpScript);
       pb.redirectErrorStream(true);
       pb.redirectOutput(ProcessBuilder.Redirect.appendTo(thrDumpOutput));
diff --git a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
index f728055197..ba207d2f8f 100644
--- a/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/ct/ctSplitModeCreateWithScriptLeadHA.conf
@@ -1,6 +1,14 @@
 hydra.Prms-testRequirement += " with lead HA.";
 hydra.Prms-testDescription += " When the tasks are being peformed, a lead node will be restarted.";
 
+CLOSETASK  taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
+            io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.ct.ValidateCTQueriesApp
+            io.snappydata.hydra.cluster.SnappyPrms-userAppArgs = "${dataFilesLocation} ${tableType} ${fullResultSetValidation} ${numRowsValidation}"
+            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
+            io.snappydata.hydra.cluster.SnappyPrms-executorMemory = 4g
+            maxThreads = 1
+            threadGroups = snappyStoreThreads
+            ;
 INCLUDE $JTESTS/io/snappydata/hydra/northwind/leadHA.inc;
 INCLUDE $JTESTS/io/snappydata/hydra/ct/ctSplitModeCreateWithScript.conf;
 INCLUDE $JTESTS/io/snappydata/hydra/northwind/backUpLeadConfig.inc;
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
index 3cff0c39ec..3b6d0ca40b 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ct/ValidateCTQueriesJob.scala
@@ -75,9 +75,9 @@ class ValidateCTQueriesJob extends SnappySQLJob {
       pw.close()
     } match {
       case Success(v) => pw.close()
-        s"See ${getCurrentDirectory}/${outputFile}"
+        s"Validation passed. See ${getCurrentDirectory}/${outputFile}"
       case Failure(e) => pw.close();
-        throw e;
+        throw new TestException(s"Validation failed. See ${getCurrentDirectory}/${outputFile}");
     }
   }
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
index d9f34a0b08..597454f0a0 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesJob.scala
@@ -78,9 +78,11 @@ class ValidateNWQueriesJob extends SnappySQLJob {
       pw.close()
     } match {
       case Success(v) => pw.close()
-        s"See ${getCurrentDirectory}/${outputFile}"
-      case Failure(e) => pw.close();
-        throw e;
+        s"Validation passed. See ${getCurrentDirectory}/${outputFile}"
+        throw new Exception()
+      case Failure(e) =>
+        pw.close();
+        throw new TestException(s"Validation failed. See ${getCurrentDirectory}/${outputFile}");
     }
   }
 
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
index f2a44672ae..7de9867ae8 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsApp.scala
@@ -41,36 +41,38 @@ object ValidateNWQueriesWithChangingConstantsApp {
     SnappyTestUtils.validateFullResultSet = true
     SnappyTestUtils.numRowsValidation = true
     SnappyTestUtils.tableType = tableType
+
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
+
     val outputFile = "ValidateNWQueriesApp_thread_" + threadID + "_" + System.currentTimeMillis +
         ".out"
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
     // scalastyle:off println
     var startTime = System.currentTimeMillis()
-    pw.println(s"createAndLoadSparkTables Test started at : " + startTime)
+    pw.println(s"${SnappyTestUtils.logTime} createAndLoadSparkTables started.. ")
     NWTestUtil.createAndLoadSparkTables(sqlContext)
     var finishTime = System.currentTimeMillis()
-    var totalTime = (finishTime -startTime)/1000
-    pw.println(s"createAndLoadSparkTables completed successfully in :" + totalTime + " secs.")
+    var totalTime = (finishTime - startTime) / 1000
+    pw.println(s"${SnappyTestUtils.logTime} createAndLoadSparkTables completed successfully in "
+        + s"$totalTime secs.")
     pw.flush()
-    pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at" +
-        s" :  " + System.currentTimeMillis)
+    pw.println(s"${SnappyTestUtils.logTime} Validation for ${tableType} tables queries started..")
     startTime = System.currentTimeMillis()
     val failedQueries: String = NWTestUtil.executeAndValidateQueriesByChangingConstants(snc,
       tableType, pw, sqlContext)
     finishTime = System.currentTimeMillis()
-    totalTime = (finishTime - startTime)/1000
+    totalTime = (finishTime - startTime) / 1000
     if (!failedQueries.isEmpty) {
       println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. See " +
           s"${getCurrentDirectory}/${outputFile}")
-      pw.println(s"Total execution took ${totalTime} seconds.")
-      pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+      pw.println(s"${SnappyTestUtils.logTime} Total execution took ${totalTime} seconds.")
+      pw.println(s"${SnappyTestUtils.logTime} Validation failed for ${tableType} tables for " +
+          s"queries ${failedQueries}. ")
       pw.close()
-      throw new Exception(s"Validation task failed for ${tableType} tables. See " +
+      throw new Exception(s"Validation failed for ${tableType} tables. See " +
           s"${getCurrentDirectory}/${outputFile}")
     }
-      pw.println(s"ValidateQueries for ${tableType} tables Test completed successfully in : " +
-          totalTime + " secs.")
+    pw.println(s"ValidateQueries for $tableType tables completed successfully in $totalTime secs.")
     pw.close()
   }
 }
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
index ed5da73f4b..95550154a2 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/ValidateNWQueriesWithChangingConstantsJob.scala
@@ -49,37 +49,37 @@ class ValidateNWQueriesWithChangingConstantsJob extends SnappySQLJob {
       NWQueries.dataFilesLocation = dataFilesLocation
       // scalastyle:off println
       var startTime = System.currentTimeMillis()
-      pw.println(s"createAndLoadSparkTables Test started at : " + startTime)
+      pw.println(s"${SnappyTestUtils.logTime} createAndLoadSparkTables started.")
       NWTestUtil.createAndLoadSparkTables(sqlContext)
       var finishTime = System.currentTimeMillis()
-      var totalTime = (finishTime -startTime)/1000
-      pw.println(s"createAndLoadSparkTables completed successfully in :" + totalTime + " secs.")
+      var totalTime = (finishTime - startTime) / 1000
+      pw.println(s"${SnappyTestUtils.logTime} createAndLoadSparkTables completed successfully in " +
+          s"$totalTime secs.")
       pw.flush()
-      pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at" +
-          s" :  " + System.currentTimeMillis)
+      pw.println(s"${SnappyTestUtils.logTime} Validation for ${tableType} tables queries started..")
       startTime = System.currentTimeMillis()
       val failedQueries: String = NWTestUtil.executeAndValidateQueriesByChangingConstants(snc,
         tableType, pw, sqlContext)
       finishTime = System.currentTimeMillis()
-      totalTime = (finishTime -startTime)/1000
-            if (!failedQueries.isEmpty) {
+      totalTime = (finishTime - startTime) / 1000
+      if (!failedQueries.isEmpty) {
         println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
-        pw.println(s"Total execution took ${totalTime} seconds.")
-        pw.println(s"Validation failed for ${tableType} tables for queries ${failedQueries}. ")
+        pw.println(s"${SnappyTestUtils.logTime} Total execution took ${totalTime} seconds.")
+        pw.println(s"${SnappyTestUtils.logTime} Validation failed for ${tableType} tables for " +
+            s"queries ${failedQueries}. ")
         pw.close()
-        throw new TestException(s"Validation task failed for ${tableType}. " +
+        throw new TestException(s"Validation failed for ${tableType}. " +
             s"See ${getCurrentDirectory}/${outputFile}")
       }
-      pw.println(s"ValidateQueries for ${tableType} tables Test completed  " +
-          s"successfully in : " + totalTime + " secs.")
-
+      pw.println(s"${SnappyTestUtils.logTime} ValidateQueries for $tableType tables completed " +
+          s"successfully in $totalTime secs.")
       pw.close()
     } match {
       case Success(v) => pw.close()
         s"See ${getCurrentDirectory}/${outputFile}"
       case Failure(e) => pw.close();
-        throw e;
+        throw new TestException(s"Validation failed. See ${getCurrentDirectory}/${outputFile}");
     }
   }
 

From 32cb9ecb7989627a03ace23c7d8a3c8f5480156e Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Thu, 17 Jan 2019 12:53:48 +0530
Subject: [PATCH 25/33] - Adding test cases for snappyJob and spark App failure
 and hang.

---
 ...thoutDMLOPsJDBCClient_ColocatedTables.conf |  1 -
 ...tWithoutDMLOPsJDBCClient_ColumnTables.conf |  1 -
 ...ithoutDMLOPsJDBCClient_PartitionedRow.conf |  1 -
 ...WithoutDMLOPsJDBCClient_ReplicatedRow.conf |  1 -
 .../testEndToEndValidation.bt                 | 23 ++++++++
 .../testSnappyJobFailure.conf                 | 18 +++++++
 .../testSnappyJobHang.conf                    | 19 +++++++
 .../testSparkAppFailure.conf                  | 18 +++++++
 .../testSparkAppHang.conf                     | 19 +++++++
 .../ValidateFailureScenarioApp.scala          | 38 +++++++++++++
 .../ValidateFailureScenarioJob.scala          | 52 ++++++++++++++++++
 .../ValidateHangScenarioApp.scala             | 43 +++++++++++++++
 .../ValidateHangScenarioJob.scala             | 53 +++++++++++++++++++
 13 files changed, 283 insertions(+), 4 deletions(-)
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testEndToEndValidation.bt
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobFailure.conf
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobHang.conf
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppFailure.conf
 create mode 100644 dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppHang.conf
 create mode 100644 dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioApp.scala
 create mode 100644 dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioJob.scala
 create mode 100644 dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioApp.scala
 create mode 100644 dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioJob.scala

diff --git a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColocatedTables.conf b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColocatedTables.conf
index 010a17bba6..bac76878ea 100644
--- a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColocatedTables.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColocatedTables.conf
@@ -6,5 +6,4 @@ INCLUDE $JTESTS/io/snappydata/hydra/snapshotIsolation/colocatedTablesExtn.inc;
 
 //task for running select queries
 TASK        taskClass   = io.snappydata.hydra.snapshotIsolation.SnapshotIsolationTest taskMethod  = HydraTask_executeQueries
-            maxThreads = 1
             threadGroups = leadThreads,snappyThreads,snappyStoreThreads;
diff --git a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColumnTables.conf b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColumnTables.conf
index ed1ca179a2..fd1d2c899d 100644
--- a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColumnTables.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ColumnTables.conf
@@ -6,5 +6,4 @@ INCLUDE $JTESTS/io/snappydata/hydra/snapshotIsolation/columnTableExtn.inc;
 
 //task for running select queries
 TASK        taskClass   = io.snappydata.hydra.snapshotIsolation.SnapshotIsolationTest taskMethod  = HydraTask_executeQueries
-            maxThreads = 1
             threadGroups = leadThreads,snappyThreads,snappyStoreThreads;
diff --git a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_PartitionedRow.conf b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_PartitionedRow.conf
index 0ea888ec54..d81c17072c 100644
--- a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_PartitionedRow.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_PartitionedRow.conf
@@ -6,5 +6,4 @@ INCLUDE $JTESTS/io/snappydata/hydra/snapshotIsolation/partitionedRowExtn.inc;
 
 //task for running select queries
 TASK        taskClass   = io.snappydata.hydra.snapshotIsolation.SnapshotIsolationTest taskMethod  = HydraTask_executeQueries
-            maxThreads = 1
             threadGroups = leadThreads,snappyThreads,snappyStoreThreads;
diff --git a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ReplicatedRow.conf b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ReplicatedRow.conf
index c63bb0081c..0cc8d25059 100644
--- a/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ReplicatedRow.conf
+++ b/dtests/src/test/java/io/snappydata/hydra/snapshotIsolation/testSnapshotWithoutDMLOPsJDBCClient_ReplicatedRow.conf
@@ -6,5 +6,4 @@ INCLUDE $JTESTS/io/snappydata/hydra/snapshotIsolation/replicatedRowExtn.inc;
 
 //task for running select queries
 TASK        taskClass   = io.snappydata.hydra.snapshotIsolation.SnapshotIsolationTest taskMethod  = HydraTask_executeQueries
-            maxThreads = 1
             threadGroups = leadThreads,snappyThreads,snappyStoreThreads;
diff --git a/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testEndToEndValidation.bt b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testEndToEndValidation.bt
new file mode 100644
index 0000000000..493473137e
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testEndToEndValidation.bt
@@ -0,0 +1,23 @@
+io/snappydata/hydra/testEndToEndValidation/testSnappyJobFailure.conf
+  A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1
+
+io/snappydata/hydra/testEndToEndValidation/testSnappyJobHang.conf
+  A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1
+
+io/snappydata/hydra/testEndToEndValidation/testSparkAppFailure.conf
+  A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1
+
+io/snappydata/hydra/testEndToEndValidation/testSparkAppHang.conf
+  A=snappyStore snappyStoreHosts=1 snappyStoreVMsPerHost=1 snappyStoreThreadsPerVM=2
+  B=lead leadHosts=1 leadVMsPerHost=1 leadThreadsPerVM=2
+  C=locator locatorHosts=1 locatorVMsPerHost=1 locatorThreadsPerVM=1
+  D=worker workerHosts=1 workerVMsPerHost=1 workerThreadsPerVM=1
diff --git a/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobFailure.conf b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobFailure.conf
new file mode 100644
index 0000000000..9addaffb20
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobFailure.conf
@@ -0,0 +1,18 @@
+hydra.Prms-testRequirement = "Test to verify hydra reports failure when the snappy job fails";
+hydra.Prms-testDescription = "
+This test starts the snappy cluster and spark cluster.
+Test then runs the snappy job that throws a TestException and eventually cause hydra test to fail.";
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/startDualModeCluster.conf;
+
+TASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
+            io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.hydra.testEndToEndValidation.ValidateFailureScenarioJob
+            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
+            threadGroups = snappyThreads
+            maxTimesToRun = 1
+            ;
+
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;
+hydra.Prms-totalTaskTimeSec           = 100;
+hydra.Prms-maxResultWaitSec           = 120;
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobHang.conf b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobHang.conf
new file mode 100644
index 0000000000..cffb0705c5
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSnappyJobHang.conf
@@ -0,0 +1,19 @@
+hydra.Prms-testRequirement = "Test to verify hydra reports failure when the snappy job hangs";
+hydra.Prms-testDescription = "
+This test starts the snappy cluster and spark cluster.
+Test then runs the snappy job that does not complete execution before hydra times out and eventually
+cause hydra test to fail.";
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/startDualModeCluster.conf;
+
+TASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSnappyJob
+            io.snappydata.hydra.cluster.SnappyPrms-jobClassNames = io.snappydata.hydra.testEndToEndValidation.ValidateHangScenarioJob
+            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
+            threadGroups = snappyThreads
+            maxTimesToRun = 1
+            ;
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;
+
+hydra.Prms-totalTaskTimeSec           = 100;
+hydra.Prms-maxResultWaitSec           = 120;
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppFailure.conf b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppFailure.conf
new file mode 100644
index 0000000000..15054e7b38
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppFailure.conf
@@ -0,0 +1,18 @@
+hydra.Prms-testRequirement = "Test to verify hydra reports failure when the spark app fails";
+hydra.Prms-testDescription = "
+This test starts the snappy cluster and spark cluster.
+Test then runs a spark app that throws an Exception and eventually cause hydra test to fail.";
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/startDualModeCluster.conf;
+
+TASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
+            io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.testEndToEndValidation.ValidateFailureScenarioApp
+            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
+            threadGroups = snappyThreads
+            maxTimesToRun = 1
+            ;
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;
+
+hydra.Prms-totalTaskTimeSec           = 100;
+hydra.Prms-maxResultWaitSec           = 120;
\ No newline at end of file
diff --git a/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppHang.conf b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppHang.conf
new file mode 100644
index 0000000000..9c70f28440
--- /dev/null
+++ b/dtests/src/test/java/io/snappydata/hydra/testEndToEndValidation/testSparkAppHang.conf
@@ -0,0 +1,19 @@
+hydra.Prms-testRequirement = "Test to verify hydra reports failure when the spark app hangs";
+hydra.Prms-testDescription = "
+This test starts the snappy cluster and spark cluster.
+Test then runs a spark app that that does not complete execution before hydra times out and
+ eventually cause hydra test to fail.";
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/startDualModeCluster.conf;
+
+TASK    taskClass   = io.snappydata.hydra.cluster.SnappyTest taskMethod  = HydraTask_executeSparkJob
+            io.snappydata.hydra.cluster.SnappyPrms-sparkJobClassNames = io.snappydata.hydra.testEndToEndValidation.ValidateHangScenarioApp
+            io.snappydata.hydra.cluster.SnappyPrms-userAppJar = snappydata-store-scala-tests*tests.jar
+            threadGroups = snappyThreads
+            maxTimesToRun = 1
+            ;
+
+INCLUDE $JTESTS/io/snappydata/hydra/northwind/stopDualModeCluster.conf;
+
+hydra.Prms-totalTaskTimeSec           = 100;
+hydra.Prms-maxResultWaitSec           = 120;
diff --git a/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioApp.scala b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioApp.scala
new file mode 100644
index 0000000000..ff778b5498
--- /dev/null
+++ b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioApp.scala
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright (c) 2016 SnappyData, Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package io.snappydata.hydra.testEndToEndValidation
+
+import java.io.{File, FileOutputStream, PrintWriter}
+
+import org.apache.spark.sql._
+import org.apache.spark.{SparkConf, SparkContext}
+
+object ValidateFailureScenarioApp {
+  def main(args: Array[String]) {
+    val conf = new SparkConf().
+        setAppName("TestHangInSparkApp_" + System.currentTimeMillis())
+    val sc = SparkContext.getOrCreate(conf)
+    val snc = SnappyContext(sc)
+    val pw: PrintWriter
+    = new PrintWriter(new FileOutputStream(new File("testAppFailure.out")), true)
+    // scalastyle:off println
+    pw.println("Throwing test exception...")
+    pw.flush()
+    throw new Exception("Throwing test exception...")
+  }
+}
diff --git a/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioJob.scala b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioJob.scala
new file mode 100644
index 0000000000..8279794c03
--- /dev/null
+++ b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateFailureScenarioJob.scala
@@ -0,0 +1,52 @@
+
+/*
+ * Copyright (c) 2016 SnappyData, Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package io.snappydata.hydra.testEndToEndValidation
+
+import java.io.{File, FileOutputStream, PrintWriter}
+
+import scala.util.{Failure, Success, Try}
+
+import com.typesafe.config.Config
+import util.TestException
+
+import org.apache.spark.sql._
+
+class ValidateFailureScenarioJob extends SnappySQLJob {
+  override def runSnappyJob(snSession: SnappySession, jobConfig: Config): Any = {
+    val snc = snSession.sqlContext
+    val pw: PrintWriter
+    = new PrintWriter(new FileOutputStream(new File("testJobFailure.out")), true)
+    Try {
+      // scalastyle:off println
+      pw.println("Throwing test exception...")
+      pw.flush()
+      throw new TestException("Throwing test exception...")
+    } match {
+      case Success(v) => pw.close()
+        s"See logs"
+      case Failure(e) =>
+        pw.println("Exception occurred while executing the job "
+            + "\nError Message:" + e.getMessage)
+        pw.close();
+        throw new TestException(s"Job failed. Check logs.");
+    }
+    pw.close();
+  }
+
+  override def isValidJob(sc: SnappySession, config: Config): SnappyJobValidation = SnappyJobValid()
+}
diff --git a/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioApp.scala b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioApp.scala
new file mode 100644
index 0000000000..6e459fb625
--- /dev/null
+++ b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioApp.scala
@@ -0,0 +1,43 @@
+
+/*
+ * Copyright (c) 2018 SnappyData, Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package io.snappydata.hydra.testEndToEndValidation
+
+import java.io.{File, FileOutputStream, PrintWriter}
+
+import org.apache.spark.sql._
+import org.apache.spark.{SparkConf, SparkContext}
+
+object ValidateHangScenarioApp {
+  def main(args: Array[String]) {
+    val conf = new SparkConf().
+        setAppName("TestFailureInSparkApp_" + System.currentTimeMillis())
+    val sc = SparkContext.getOrCreate(conf)
+    val snc = SnappyContext(sc)
+
+    def getCurrentDirectory = new java.io.File(".").getCanonicalPath
+
+    val outputFile = "testAppHang.out"
+    val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
+    // scalastyle:off println
+    pw.println("Thread goes to sleep now...")
+    pw.flush()
+    Thread.sleep(300 * 1000)
+    pw.println("Done sleeping")
+    pw.flush()
+  }
+}
diff --git a/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioJob.scala b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioJob.scala
new file mode 100644
index 0000000000..40f57c5a38
--- /dev/null
+++ b/dtests/src/test/scala/io/snappydata/hydra/testEndToEndValidation/ValidateHangScenarioJob.scala
@@ -0,0 +1,53 @@
+
+/*
+ * Copyright (c) 2018 SnappyData, Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package io.snappydata.hydra.testEndToEndValidation
+
+import java.io.{File, FileOutputStream, PrintWriter}
+
+import util.TestException
+import com.typesafe.config.Config
+import io.snappydata.hydra.{SnappyTestUtils, northwind}
+
+import org.apache.spark.SparkContext
+import org.apache.spark.sql._
+import scala.util.{Failure, Success, Try}
+
+class ValidateHangScenarioJob extends SnappySQLJob {
+  override def runSnappyJob(snappySession: SnappySession, jobConfig: Config): Any = {
+    val snc = snappySession.sqlContext
+
+    def getCurrentDirectory = new java.io.File(".").getCanonicalPath
+    val outputFile = "testJobHang.out"
+    val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
+    Try {
+      // scalastyle:off println
+      pw.println("Thread goes to sleep now...")
+      pw.flush()
+      Thread.sleep(300*1000)
+      pw.println("Done sleeping")
+      pw.flush()
+    } match {
+      case Success(v) => pw.close()
+        s"See ${getCurrentDirectory}/${outputFile}"
+      case Failure(e) => pw.close();
+        throw e;
+    }
+  }
+
+  override def isValidJob(sc: SnappySession, config: Config): SnappyJobValidation = SnappyJobValid()
+}

From 54baa7e1783104f7523aa23d8b15a3cc7479a7e7 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@snappydata.io>
Date: Thu, 7 Feb 2019 12:54:35 +0530
Subject: [PATCH 26/33] - Adding method for changing log level at runtime. In
 case of failure, chnage the log-level to debug and re-run task. - Removing
 the test flag for setting plan chaching to false in case of embedded and
 smart connector mode, as the product default is set false.

---
 .../snappydata/hydra/cluster/SnappyPrms.java  |  33 +++
 .../snappydata/hydra/cluster/SnappyTest.java  |  27 +++
 .../io/snappydata/hydra/SnappyTestUtils.scala |  52 +----
 .../hydra/ao/ConvertToParquet.scala           |  81 +++++++
 .../io/snappydata/hydra/ao/LoadData.scala     |  19 +-
 .../hydra/northwind/NWTestUtil.scala          | 200 +++++++++---------
 6 files changed, 262 insertions(+), 150 deletions(-)
 create mode 100644 dtests/src/test/scala/io/snappydata/hydra/ao/ConvertToParquet.scala

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
index c031b9c016..d72120d557 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
@@ -630,6 +630,39 @@ public static int getNumOfStackDumpItrs(){
     return tasktab().intAt(key, tab().intAt(key, 3));
   }
 
+  /**
+   *
+   */
+
+  public static Long reRunWithDebugLogging;
+
+  public static boolean isReRunWithDebugEnabled() {
+    Long key = reRunWithDebugLogging;
+    return tasktab().booleanAt(key, tab().booleanAt(key, false));
+
+  }
+  /**
+   * Parameter to pass for which logger the loglevel settings should be changed
+   */
+  public static Long logger;
+
+  public static String getLogger(){
+    Long key = logger;
+    return tasktab().stringAt(key, tab().stringAt(key, ""));
+  }
+
+  /**
+   * Parameter to specificy the required loglevel setting.
+   */
+
+  public static Long newLogLevel;
+
+  public static String getNewLogLevel(){
+    Long key = newLogLevel;
+    return tasktab().stringAt(key, tab().stringAt(key, "CONFIG"));
+  }
+
+
   /**
    * Parameter used to get the List of expected exceptions in the test run.
    * Array of values for expected exceptions
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 74a1ad98bf..1fb4886a40 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -23,6 +23,7 @@
 import java.rmi.RemoteException;
 import java.sql.Connection;
 import java.sql.DriverManager;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.text.SimpleDateFormat;
@@ -2250,6 +2251,11 @@ public void executeSnappyJob(Vector jobClassNames, String logFileName, String
       }
       boolean retry = snappyTest.getSnappyJobsStatus(snappyJobScript, logFile, leadPort);
       if(!checkJobStatus(getJobIDs(logFile)) && !cycleVms){
+        if(SnappyPrms.isReRunWithDebugEnabled())
+        {
+          HydraTask_changeLogLevel();
+          HydraTask_executeSnappyJob();
+        }
         throw new TestException
             ("Snappy job execution has failed. Please check the logs.");
       }
@@ -3767,6 +3773,27 @@ public List<String> getHostNameFromConf(String nodeName) {
     return hostNames;
   }
 
+  public static void HydraTask_changeLogLevel() {
+    setLogLevel(SnappyPrms.getLogger(), SnappyPrms.getNewLogLevel());
+  }
+
+  public static void setLogLevel(String logger, String loglevel){
+    Connection conn = null;
+    try {
+      conn = getLocatorConnection();
+    } catch (SQLException se) {
+      throw new TestException("Got exception while getting connection", se);
+    }
+    try {
+      PreparedStatement ps = conn.prepareStatement("call sys.set_log_level(?,?)");
+      ps.setString(0, logger);
+      ps.setString(1, loglevel);
+      ps.execute();
+    } catch(SQLException se) {
+      throw new TestException("Got exception while executing set log level procedure.", se);
+    }
+  }
+
   protected void startSnappyLocator() {
     File log = null;
     List<String> hostNames = getHostNameFromConf("locators");
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index e529e9a20e..a6662b9708 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -41,44 +41,26 @@ object SnappyTestUtils {
   Executes the join query, matches only the full result with expected result, returns false if the
   query validation has failed.
   */
-  def assertJoin(snc: SnappyContext, sqlString: String, queryNum: String, pw: PrintWriter,
-      sqlContext: SQLContext, usePlanCaching: Boolean): Boolean = {
-    var validationFailed = false
-    numRowsValidation = false
-    validationFailed = assertJoin(snc, sqlString, 0, queryNum, pw, sqlContext, usePlanCaching)
-    return validationFailed
-  }
-
   def assertJoin(snc: SnappyContext, sqlString: String, queryNum: String, pw: PrintWriter,
       sqlContext: SQLContext): Boolean = {
     var validationFailed = false
     numRowsValidation = false
-    validationFailed = assertJoin(snc, sqlString, 0, queryNum, pw, sqlContext, true)
+    validationFailed = assertJoin(snc, sqlString, 0, queryNum, pw, sqlContext)
     return validationFailed
   }
 
-
   /*
   Executes the join query, matches the result with expected result, returns false if the query
   validation has failed.
   */
-  def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String, pw:
-  PrintWriter, sqlContext: SQLContext): Boolean = {
-    var validationFailed = false
-    numRowsValidation = true
-    validationFailed = assertJoin(snc, sqlString, numRows, queryNum, pw, sqlContext, true)
-    return validationFailed
-  }
-
   def assertJoin(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-      pw: PrintWriter, sqlContext: SQLContext, usePlanCaching: Boolean): Boolean = {
+      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     var validationFailed = false
     snc.sql("set spark.sql.crossJoin.enabled = true")
     if (validateFullResultSet) {
       sqlContext.sql("set spark.sql.crossJoin.enabled = true")
     }
-    validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext,
-      usePlanCaching)
+    validationFailed = assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext)
     return validationFailed
   }
 
@@ -86,37 +68,21 @@ object SnappyTestUtils {
    Executes the query, matches only the full resultSet with expected result, returns false if the
    query validation has failed.
    */
-  def assertQuery(snc: SnappyContext, sqlString: String, queryNum: String,
-      pw: PrintWriter, sqlContext: SQLContext, usePlanCaching: Boolean): Boolean = {
-    numRowsValidation = false
-    assertQuery(snc, sqlString, 0, queryNum, pw, sqlContext, usePlanCaching)
-  }
-
   def assertQuery(snc: SnappyContext, sqlString: String, queryNum: String,
       pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     numRowsValidation = false
-    assertQuery(snc, sqlString, 0, queryNum, pw, sqlContext, true)
-  }
-
-  def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
-    numRowsValidation = true
-    assertQuery(snc, sqlString, numRows, queryNum, pw, sqlContext, true)
+    assertQuery(snc, sqlString, 0, queryNum, pw, sqlContext)
   }
 
   /*
- Executes the query, matches the result with expected result, returns false if the query
- validation has failed.
- */
+   Executes the query, matches the result with expected result, returns false if the query
+   validation has failed.
+   */
   def assertQuery(snc: SnappyContext, sqlString: String, numRows: Int, queryNum: String,
-      pw: PrintWriter, sqlContext: SQLContext, usePlanCaching: Boolean): Boolean = {
+      pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     var validationFailed = false
     var snappyDF: DataFrame = null
-    if (!usePlanCaching) {
-      snappyDF = snc.sqlUncached(sqlString)
-    } else {
-      snappyDF = snc.sql(sqlString)
-    }
+    snappyDF = snc.sql(sqlString)
     val count = snappyDF.count
     // scalastyle:off println
     pw.println(s"\n${logTime} Executing Query $queryNum ...")
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ao/ConvertToParquet.scala b/dtests/src/test/scala/io/snappydata/hydra/ao/ConvertToParquet.scala
new file mode 100644
index 0000000000..7a13573560
--- /dev/null
+++ b/dtests/src/test/scala/io/snappydata/hydra/ao/ConvertToParquet.scala
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 SnappyData, Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package io.snappydata.hydra.ao
+
+import java.io.{File, PrintWriter}
+import java.sql.Timestamp
+
+import scala.util.{Failure, Success, Try}
+
+import com.typesafe.config.Config
+
+import org.apache.spark.sql._
+
+
+object ConvertToParquet extends SnappySQLJob {
+
+  val currDir: String = new java.io.File(".").getCanonicalPath
+  val fileSep: String = File.separator
+  override def runSnappyJob(snSession: SnappySession, jobConfig: Config): Any = {
+    val outputFileName = s"ConvertToParquet_${System.currentTimeMillis()}.out"
+    val pw = new PrintWriter(outputFileName)
+    Try {
+      // scalastyle:off println
+      val snc = snSession.sqlContext
+      val tableName = jobConfig.getString("tableName")
+      // where to store the parquet file and read from csv
+      val dataFileLocation = jobConfig.getString("dataFileLocation")
+      val fromType: String = jobConfig.getString("fromType")
+      writeToParquetAndCsv(tableName, pw, snc, dataFileLocation, fromType)
+    } match {
+      case Success(v) => pw.close()
+        s"See ${currDir}/$outputFileName"
+      case Failure(e) => pw.close();
+        throw e;
+    }
+  }
+
+  def writeToParquetAndCsv(tableName: String, pw: PrintWriter, snc: SQLContext, dataFileLocation:
+  String, fromType: String): Unit = {
+    pw.println("Writing data to parquet and/or csv..")
+    val parquetFileLoc = dataFileLocation  + fileSep + "parquetFiles"
+    val csvFileLoc = dataFileLocation + fileSep + "csvFiles"
+    var df: DataFrame = null
+    if (fromType.equals("csv")) {
+      df = snc.read.format("com.databricks.spark.csv")
+          .option("header", "false")
+          .option("inferSchema", "false")
+          .option("maxCharsPerColumn", "4096")
+          .option("nullValue", "")
+          .csv(s"${dataFileLocation}/${tableName}.csv")
+      df.write.parquet(s"${parquetFileLoc}/${tableName}")
+    } else if (fromType.equals("table")) {
+      df = snc.sql(s"select * from ${tableName}")
+      df.write.parquet(s"${parquetFileLoc}/${tableName}")
+      df.write.csv(s"${csvFileLoc}/$tableName")
+    }
+    pw.flush()
+  }
+
+  /**
+    * Validate if the data files are available, else throw SparkJobInvalid
+    *
+    */
+  override def isValidJob(sc: SnappySession, config: Config): SnappyJobValidation = SnappyJobValid()
+
+}
\ No newline at end of file
diff --git a/dtests/src/test/scala/io/snappydata/hydra/ao/LoadData.scala b/dtests/src/test/scala/io/snappydata/hydra/ao/LoadData.scala
index 02498ecf8c..08e963ef1f 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/ao/LoadData.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/ao/LoadData.scala
@@ -115,6 +115,9 @@ object LoadData extends SnappySQLJob {
     pw.flush()
   }
 
+  /*
+  Generate duplicate data from csv.
+   */
   def generateDupData(tableName: String, pw: PrintWriter, numIter: Int, snc: SQLContext,
   dataLocation: String, csvFileLocation: String, parquetFileLocation: String): Any
   = {
@@ -129,20 +132,24 @@ object LoadData extends SnappySQLJob {
     // generating 10 million records from 2 lac records
     pw.println("Duplicating data to temp table...")
     for (i <- 0 until numIter) {
-      temp_DF.write.format("column").mode(SaveMode.Append).saveAsTable(s"tempTable")
+      temp_DF.write.format("column").mode(SaveMode.Append).saveAsTable(s"${tableName}Temp")
     }
 
     // writing dat to parquet and csv
-    pw.println("creating df with duplicate records and writing it to csv and parquet..")
-    val df = snc.sql("select * from meterReadingstemp")
-    df.write.parquet(s"${parquetFileLocation}/${tableName}")
-    df.write.csv(s"${csvFileLocation}/$tableName")
+    writeToCsvAndParquet(tableName + "Temp", pw, snc, csvFileLocation, parquetFileLocation)
 
     pw.println("Dropping temp table")
     snc.sql(s"drop table if exists ${tableName}Temp")
-    pw.flush()
   }
 
+  def writeToCsvAndParquet(tableName: String, pw: PrintWriter, snc: SQLContext, csvFileLocation:
+  String, parquetFileLocation: String): Unit = {
+    pw.println("creating df with duplicate records and writing it to csv and parquet..")
+    val df = snc.sql(s"select * from ${tableName}")
+    df.write.parquet(s"${parquetFileLocation}/${tableName}")
+    df.write.csv(s"${csvFileLocation}/$tableName")
+    pw.flush()
+  }
   /**
     * Validate if the data files are available, else throw SparkJobInvalid
     *
diff --git a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
index e8f0caffaf..b547208a98 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/northwind/NWTestUtil.scala
@@ -16,8 +16,7 @@
  */
 package io.snappydata.hydra.northwind
 
-import java.io.{File, PrintWriter}
-
+import java.io.PrintWriter
 
 import io.snappydata.hydra.SnappyTestUtils
 
@@ -71,7 +70,6 @@ object NWTestUtil {
   */
   def validateQueries(snc: SnappyContext, tableType: String, pw: PrintWriter, sqlContext:
   SQLContext): String = {
-    val usePlanCaching: Boolean = false
     var failedQueries = ""
     if (SnappyTestUtils.validateFullResultSet) {
       // scalastyle:off println
@@ -177,170 +175,170 @@ object NWTestUtil {
       }
       q._1 match {
         case "Q25" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q25, 1,
-          "Q25", pw, sqlContext, usePlanCaching)
+          "Q25", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q25_1, 1, "Q25_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q25_2, 1, "Q25_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q26" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q26, 86,
-          "Q26", pw, sqlContext, usePlanCaching)
+          "Q26", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q26_1, 54, "Q26_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q26_2, 60, "Q26_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q27" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27, 9, "Q27",
-          pw, sqlContext, usePlanCaching)
+          pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27_1, 5, "Q27_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27_2, 8, "Q27_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27_3, 3, "Q27_3", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q27_4, 6, "Q27_4", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q28" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q28, 12,
-          "Q28", pw, sqlContext, usePlanCaching)
+          "Q28", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q28_1, 12, "Q28_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q28_2, 5, "Q28_2",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q29" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q29, 8,
-          "Q29", pw, sqlContext, usePlanCaching)
+          "Q29", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q29_1, 5, "Q29_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q29_2, 6, "Q29_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q30" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q30, 8, "Q30",
-          pw, sqlContext, usePlanCaching)
+          pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q30_1, 8, "Q30_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q30_2, 6, "Q30_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q31" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31, 830,
-          "Q31", pw, sqlContext, usePlanCaching)
+          "Q31", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31_1, 502, "Q31_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31_2, 286, "Q31_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31_3, 219, "Q31_3", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q31_4, 484, "Q31_4", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q32" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q32, 8, "Q32",
-          pw, sqlContext, usePlanCaching)
+          pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q32_1, 282, "Q32_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q33" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q33, 37,
-          "Q33", pw, sqlContext, usePlanCaching)
+          "Q33", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q33_1, 769, "Q33_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q34" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q34, 5,
-          "Q34", pw, sqlContext, usePlanCaching)
+          "Q34", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q34_1, 1, "Q34_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q34_2, 4, "Q34_2",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q35" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q35, 3, "Q35",
-          pw, sqlContext, usePlanCaching)
+          pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q35_1, 2, "Q35_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q35_2, 3, "Q35_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q36" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q36, 290,
-          "Q36", pw, sqlContext, usePlanCaching)
+          "Q36", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q36_1, 232, "Q36_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q36_2, 61, "Q36_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q38" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q38, 2155,
-          "Q38", pw, sqlContext, usePlanCaching)
+          "Q38", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q38_1, 2080, "Q38_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q38_2, 2041, "Q38_2",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q40" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q40, 830,
-          "Q40", pw, sqlContext, usePlanCaching)
+          "Q40", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q40_1, 12, "Q40_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q40_2, 9, "Q40_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q42" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q42, 22,
-          "Q42", pw, sqlContext, usePlanCaching)
+          "Q42", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q42_1, 22, "Q42_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q42_2, 7, "Q42_2",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q43" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q43, 830,
-          "Q43", pw, sqlContext, usePlanCaching)
+          "Q43", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q43_1, 10, "Q43_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q43_2, 2, "Q43_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q49" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q49, 1788650,
-          "Q49", pw, sqlContext, usePlanCaching)
+          "Q49", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q49_1, 1713225, "Q49_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q49_2, 1741240, "Q49_2",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q51" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q51, 2155,
-          "Q51", pw, sqlContext, usePlanCaching)
+          "Q51", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q51_1, 2080, "Q51_1",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q51_2, 2041, "Q51_2",
-              pw, sqlContext, usePlanCaching)
+              pw, sqlContext)
           }; queryExecuted = true
         case "Q55" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q55, 21,
-          "Q55", pw, sqlContext, usePlanCaching)
+          "Q55", pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q55_1, 7, "Q55_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q55_2, 6, "Q55_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case "Q56" => hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56, 8, "Q56",
-          pw, sqlContext, usePlanCaching)
+          pw, sqlContext)
           if (executeQueriesByChangingConstants) {
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56_1, 8, "Q56_1", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56_2, 8, "Q56_2", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
             hasValidationFailed = SnappyTestUtils.assertJoin(snc, NWQueries.Q56_3, 8, "Q56_3", pw,
-              sqlContext, usePlanCaching)
+              sqlContext)
           }; queryExecuted = true
         case _ => // do nothing
       }
@@ -414,37 +412,37 @@ object NWTestUtil {
         case "Q25" => SnappyTestUtils.assertJoin(snc, NWQueries.Q25, "Q25",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q25_1, "Q25_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q25_2, "Q25_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q26" => SnappyTestUtils.assertJoin(snc, NWQueries.Q26, "Q26",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q26_1, "Q26_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q26_2, "Q26_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q27" => SnappyTestUtils.assertJoin(snc, NWQueries.Q27, "Q27",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q27_1, "Q27_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q27_2, "Q27_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q27_3, "Q27_3",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q27_4, "Q27_4",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q28" => SnappyTestUtils.assertJoin(snc, NWQueries.Q28, "Q28",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q28_1, "Q28_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q28_2, "Q28_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q30" => SnappyTestUtils.assertJoin(snc, NWQueries.Q30, "Q30",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q30_2, "Q30_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q30_2, "Q30_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q61" => SnappyTestUtils.assertQuery(snc, NWQueries.Q61, "Q61",
           pw, sqlContext)
         case "Q62" => SnappyTestUtils.assertQuery(snc, NWQueries.Q62, "Q62",
@@ -452,81 +450,81 @@ object NWTestUtil {
         case "Q31" => SnappyTestUtils.assertJoin(snc, NWQueries.Q31, "Q31",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q31_1, "Q31_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q31_2, "Q31_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q31_3, "Q31_3",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q31_4, "Q31_4",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q32" => SnappyTestUtils.assertJoin(snc, NWQueries.Q32, "Q32",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q32_1, "Q32_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q33" => SnappyTestUtils.assertJoin(snc, NWQueries.Q33, "Q33",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q33_1, "Q33_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q34" => SnappyTestUtils.assertJoin(snc, NWQueries.Q34, "Q34",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q34_1, "Q34_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q34_2, "Q34_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q36" => SnappyTestUtils.assertJoin(snc, NWQueries.Q36, "Q36",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q36_1, "Q36_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q36_2, "Q36_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q37" => SnappyTestUtils.assertJoin(snc, NWQueries.Q37, "Q37",
           pw, sqlContext)
         case "Q38" => SnappyTestUtils.assertJoin(snc, NWQueries.Q38, "Q38",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q38_1, "Q38_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q38_2, "Q38_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q39" => SnappyTestUtils.assertJoin(snc, NWQueries.Q39, "Q39",
           pw, sqlContext)
         case "Q40" => SnappyTestUtils.assertJoin(snc, NWQueries.Q40, "Q40",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q40_1, "Q40_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q40_2, "Q40_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q41" => SnappyTestUtils.assertJoin(snc, NWQueries.Q41, "Q41",
           pw, sqlContext)
         case "Q42" => SnappyTestUtils.assertJoin(snc, NWQueries.Q42, "Q42",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q42_1, "Q42_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q42_2, "Q42_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q43" => SnappyTestUtils.assertJoin(snc, NWQueries.Q43, "Q43",
           pw, sqlContext)
         case "Q51" => SnappyTestUtils.assertJoin(snc, NWQueries.Q51, "Q51",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q51_1, "Q51_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q51_2, "Q51_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q52" => SnappyTestUtils.assertJoin(snc, NWQueries.Q52, "Q52",
           pw, sqlContext)
         case "Q55" => SnappyTestUtils.assertJoin(snc, NWQueries.Q55, "Q55",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q55_1, "Q55_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q55_2, "Q55_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q56" => SnappyTestUtils.assertJoin(snc, NWQueries.Q56, "Q56",
           pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q56_1, "Q56_1",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q56_2, "Q56_2",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
           SnappyTestUtils.assertJoin(snc, NWQueries.Q56_3, "Q56_3",
-            pw, sqlContext, usePlanCaching)
+            pw, sqlContext)
         case "Q58" => SnappyTestUtils.assertQuery(snc, NWQueries.Q58, "Q58",
           pw, sqlContext)
         case "Q59" => SnappyTestUtils.assertQuery(snc, NWQueries.Q59, "Q59",

From 2024c3af32cc8aa223079ddb1cc1e1c564043af0 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Mon, 3 Jun 2019 14:26:51 +0530
Subject: [PATCH 27/33] - Some formating changes.

---
 .../snappydata/hydra/cluster/SnappyTest.java  | 22 ++++++++-----------
 1 file changed, 9 insertions(+), 13 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 1fb4886a40..7fd69ca4fb 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -2877,7 +2877,7 @@ public static synchronized void HydraTask_checkSuspectStrings() {
   }
 
   public void checkSuspectStrings() {
-    String hasOOMEORJVMCrash = checkForJVMCrashOrOOME();
+    String hasOOMEOrJVMCrash = checkForJVMCrashOrOOME();
     String checkSuspectOutPut = getCurrentDirPath() + File.separator + "suspectStrings.txt";
     File suspectStringFile = new File(checkSuspectOutPut);
     StringBuilder cmd = new StringBuilder();
@@ -2905,9 +2905,6 @@ public void checkSuspectStrings() {
     if(suspectStringFile.length() != 0){
       try {
         StringBuilder exceptionList = new StringBuilder();
-        if(cycleVms){
-
-        }
         BufferedReader reader = new BufferedReader(new FileReader(suspectStringFile));
         String line = "";
         while((line = reader.readLine()) != null)
@@ -2915,13 +2912,13 @@ public void checkSuspectStrings() {
         throw new TestException("Unknown Exceptions observed in the run " + exceptionList
             .toString());
       } catch(FileNotFoundException fe) {
-
+        throw new TestException("Got exception while checking for suspect strings." , fe);
       } catch(IOException ie) {
-
+        throw new TestException("Got exception while checking for suspect strings." , ie);
       }
     }
-    if(hasOOMEORJVMCrash.length()>0){
-      throw new TestException(hasOOMEORJVMCrash);
+    if(hasOOMEOrJVMCrash.length()>0){
+      throw new TestException(hasOOMEOrJVMCrash);
     }
   }
 
@@ -3014,7 +3011,6 @@ public static synchronized void HydraTask_stopSparkCluster() {
       String s = "problem occurred while retriving destination logFile path " + log;
       throw new TestException(s, e);
     }
-
   }
 
   /**
@@ -3194,10 +3190,10 @@ public static synchronized void HydraTask_cycleLeadVM() {
           try {
             if (vmName.equalsIgnoreCase("lead"))
               vms = stopStartVMs(numToKill, "lead", isDmlOp, restart, rebalance);
-            else if (vmName.equalsIgnoreCase("server")) vms = stopStartVMs(numToKill, "server",
-                isDmlOp, restart, rebalance);
-            else if (vmName.equalsIgnoreCase("locator")) vms = stopStartVMs(numToKill,
-                "locator", isDmlOp, restart, rebalance);
+            else if (vmName.equalsIgnoreCase("server"))
+              vms = stopStartVMs(numToKill, "server", isDmlOp, restart, rebalance);
+            else if (vmName.equalsIgnoreCase("locator"))
+              vms = stopStartVMs(numToKill, "locator", isDmlOp, restart, rebalance);
             break;
           } catch (TestException te) {
             throw te;

From 0eda14d358cc42570ef4b49a43d1238f2fb8ddf6 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Thu, 6 Jun 2019 15:06:00 +0530
Subject: [PATCH 28/33] - Adding changes for throwing exception of job failure
 (for end-to-end validation) to SPVA schema tests.

---
 .../snappydata/hydra/spva/SPVATestUtil.scala  | 215 ++++++++++++------
 .../hydra/spva/ValidateSPVAQueriesJob.scala   |  16 +-
 2 files changed, 148 insertions(+), 83 deletions(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala
index 021585122e..1595410534 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala
@@ -148,84 +148,153 @@ object SPVATestUtil {
   }
 
   def validateQueriesFullResultSet(snc: SnappyContext, tableType: String, pw: PrintWriter,
-                                   sqlContext: SQLContext): Unit = {
+                                   sqlContext: SQLContext): String = {
+    var failedQueries = ""
+    SnappyTestUtils.tableType = tableType
     for (q <- SPVAQueries.queries) {
+      var queryExecuted = true;
+      var validationFailed = false;
+      if (SnappyTestUtils.validateFullResultSet) {
+        // scalastyle:off println
+        pw.println(s"createAndLoadSparkTables started ...")
+        val startTime = System.currentTimeMillis
+        createAndLoadSparkTables(sqlContext)
+        val finishTime = System.currentTimeMillis()
+        pw.println(s"createAndLoadSparkTables completed successfully in : " + ((finishTime -
+            startTime)/1000) + " seconds")
+      }
       q._1 match {
-        case "Q1_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_1, "Q1_1", pw, sqlContext)
-        case "Q1_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_2, "Q1_2", pw, sqlContext)
-        case "Q1_3" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_3, "Q1_3", pw, sqlContext)
-        case "Q1_4" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_4, "Q1_4", pw, sqlContext)
-        case "Q1_5" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_5, "Q1_5", pw, sqlContext)
-        case "Q1_6" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_6, "Q1_6", pw, sqlContext)
-        case "Q1_7" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_7, "Q1_7", pw, sqlContext)
-        case "Q1_8" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_8, "Q1_8", pw, sqlContext)
-        case "Q1_9" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_9, "Q1_9", pw, sqlContext)
-        case "Q2_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_1, "Q2_1", pw, sqlContext)
-        case "Q2_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_2, "Q2_2", pw, sqlContext)
-        case "Q2_3" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_3, "Q2_3", pw, sqlContext)
-        case "Q2_4" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_4, "Q2_4", pw, sqlContext)
-        case "Q2_5" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_5, "Q2_5", pw, sqlContext)
-        case "Q2_6" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_6, "Q2_6", pw, sqlContext)
-        case "Q2_7" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_7, "Q2_7", pw, sqlContext)
-        case "Q2_8" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_8, "Q2_8", pw, sqlContext)
-        case "Q2_9" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_9, "Q2_9", pw, sqlContext)
-        case "Q2_10" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_10, "Q2_10", pw, sqlContext)
-        case "Q2_11" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_11, "Q2_11", pw, sqlContext)
-        case "Q2_12" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_12, "Q2_12", pw, sqlContext)
-        case "Q2_13" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_13, "Q2_13", pw, sqlContext)
-        case "Q3_1_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_1_2, "Q3_1_2",
-          pw, sqlContext)
-        case "Q3_2_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_2_2, "Q3_2_2",
-          pw, sqlContext)
-        case "Q3_3" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_3, "Q3_3", pw, sqlContext)
-        case "Q3_4_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_4_1, "Q3_4_1",
-          pw, sqlContext)
-        case "Q3_4_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_4_2, "Q3_4_2",
-          pw, sqlContext)
-        case "Q3_4_3" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_4_3, "Q3_4_3",
-          pw, sqlContext)
-        case "Q3_5" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_5, "Q3_5", pw, sqlContext)
-        case "Q3_6" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_6, "Q3_6", pw, sqlContext)
-        case "Q3_7_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_1, "Q3_7_1",
-          pw, sqlContext)
-        case "Q3_7_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_2, "Q3_7_2",
-          pw, sqlContext)
-        case "Q3_7_3" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_3, "Q3_7_3",
-          pw, sqlContext)
-        case "Q3_7_4" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_4, "Q3_7_4",
-          pw, sqlContext)
-        case "Q3_7_5" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_5, "Q3_7_5",
-          pw, sqlContext)
-        case "Q3_8" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_8, "Q3_8", pw, sqlContext)
-        case "Q3_9" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_9, "Q3_9", pw, sqlContext)
-        case "Q3_10" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_10, "Q3_10", pw, sqlContext)
-        case "Q3_11" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_11, "Q3_11", pw, sqlContext)
-        case "Q3_12" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_12, "Q3_12", pw, sqlContext)
-        case "Q3_13" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_13, "Q3_13", pw, sqlContext)
-        case "Q3_14" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_14, "Q3_14", pw, sqlContext)
-        case "Q4_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q4_1, "Q4_1", pw, sqlContext)
-        case "Q4_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q4_2, "Q4_2", pw, sqlContext)
-        case "Q5_1_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q5_1_1, "Q5_1_1",
-          pw, sqlContext)
-        case "Q5_1_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q5_1_2, "Q5_1_2",
-          pw, sqlContext)
-        case "Q5_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q5_2, "Q5_2", pw, sqlContext)
-        case "Q6_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q6_1, "Q6_1", pw, sqlContext)
-        case "Q6_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q6_2, "Q6_2", pw, sqlContext)
-        case "Q7_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q7_1, "Q7_1", pw, sqlContext)
-        case "Q7_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q7_2, "Q7_2", pw, sqlContext)
-        case "Q8_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q8_1, "Q8_1", pw, sqlContext)
-        case "Q8_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q8_2, "Q8_2", pw, sqlContext)
-        case "Q9_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q9_1, "Q9_1", pw, sqlContext)
-        case "Q9_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q9_2, "Q9_2", pw, sqlContext)
-        case "Q10" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q10, "Q10", pw, sqlContext)
-        case "Q11_1" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q11_1, "Q11_1", pw, sqlContext)
-        case "Q11_2" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q11_2, "Q11_2", pw, sqlContext)
-        case "Q12" => SnappyTestUtils.assertQuery(snc, SPVAQueries.Q12, "Q12", pw, sqlContext)
+        case "Q1_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_1,
+          "Q1_1", pw, sqlContext)
+        case "Q1_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_2, "Q1_2",
+          pw, sqlContext)
+        case "Q1_3" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_3, "Q1_3",
+          pw, sqlContext)
+        case "Q1_4" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_4, "Q1_4",
+          pw, sqlContext)
+        case "Q1_5" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_5, "Q1_5",
+          pw, sqlContext)
+        case "Q1_6" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_6, "Q1_6",
+          pw, sqlContext)
+        case "Q1_7" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_7, "Q1_7",
+          pw, sqlContext)
+        case "Q1_8" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_8, "Q1_8",
+          pw, sqlContext)
+        case "Q1_9" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q1_9, "Q1_9",
+          pw, sqlContext)
+        case "Q2_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_1, "Q2_1",
+          pw, sqlContext)
+        case "Q2_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_2, "Q2_2",
+          pw, sqlContext)
+        case "Q2_3" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_3, "Q2_3",
+          pw, sqlContext)
+        case "Q2_4" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_4, "Q2_4",
+          pw, sqlContext)
+        case "Q2_5" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_5, "Q2_5",
+          pw, sqlContext)
+        case "Q2_6" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_6, "Q2_6",
+          pw, sqlContext)
+        case "Q2_7" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_7, "Q2_7",
+          pw, sqlContext)
+        case "Q2_8" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_8, "Q2_8",
+          pw, sqlContext)
+        case "Q2_9" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_9, "Q2_9",
+          pw, sqlContext)
+        case "Q2_10" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_10,
+          "Q2_10", pw, sqlContext)
+        case "Q2_11" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_11,
+          "Q2_11", pw, sqlContext)
+        case "Q2_12" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_12,
+          "Q2_12", pw, sqlContext)
+        case "Q2_13" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q2_13,
+          "Q2_13", pw, sqlContext)
+        case "Q3_1_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_1_2,
+          "Q3_1_2", pw, sqlContext)
+        case "Q3_2_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_2_2,
+          "Q3_2_2", pw, sqlContext)
+        case "Q3_3" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_3,
+          "Q3_3", pw, sqlContext)
+        case "Q3_4_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_4_1,
+          "Q3_4_1", pw, sqlContext)
+        case "Q3_4_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_4_2,
+          "Q3_4_2", pw, sqlContext)
+        case "Q3_4_3" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_4_3,
+          "Q3_4_3", pw, sqlContext)
+        case "Q3_5" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_5, "Q3_5",
+          pw, sqlContext)
+        case "Q3_6" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_6, "Q3_6",
+          pw, sqlContext)
+        case "Q3_7_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_1,
+          "Q3_7_1", pw, sqlContext)
+        case "Q3_7_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_2,
+          "Q3_7_2", pw, sqlContext)
+        case "Q3_7_3" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_3,
+          "Q3_7_3", pw, sqlContext)
+        case "Q3_7_4" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_4,
+          "Q3_7_4", pw, sqlContext)
+        case "Q3_7_5" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_7_5,
+          "Q3_7_5", pw, sqlContext)
+        case "Q3_8" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_8, "Q3_8",
+          pw, sqlContext)
+        case "Q3_9" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_9, "Q3_9",
+          pw, sqlContext)
+        case "Q3_10" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_10,
+          "Q3_10", pw, sqlContext)
+        case "Q3_11" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_11,
+          "Q3_11", pw, sqlContext)
+        case "Q3_12" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_12,
+          "Q3_12", pw, sqlContext)
+        case "Q3_13" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_13,
+          "Q3_13", pw, sqlContext)
+        case "Q3_14" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q3_14,
+          "Q3_14", pw, sqlContext)
+        case "Q4_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q4_1, "Q4_1",
+          pw, sqlContext)
+        case "Q4_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q4_2, "Q4_2",
+          pw, sqlContext)
+        case "Q5_1_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q5_1_1,
+          "Q5_1_1", pw, sqlContext)
+        case "Q5_1_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q5_1_2,
+          "Q5_1_2", pw, sqlContext)
+        case "Q5_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q5_2, "Q5_2",
+          pw, sqlContext)
+        case "Q6_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q6_1, "Q6_1",
+          pw, sqlContext)
+        case "Q6_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q6_2, "Q6_2",
+          pw, sqlContext)
+        case "Q7_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q7_1, "Q7_1",
+          pw, sqlContext)
+        case "Q7_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q7_2, "Q7_2",
+          pw, sqlContext)
+        case "Q8_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q8_1, "Q8_1",
+          pw, sqlContext)
+        case "Q8_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q8_2, "Q8_2",
+          pw, sqlContext)
+        case "Q9_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q9_1, "Q9_1",
+          pw, sqlContext)
+        case "Q9_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q9_2, "Q9_2",
+          pw, sqlContext)
+        case "Q10" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q10, "Q10",
+          pw, sqlContext)
+        case "Q11_1" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q11_1,
+          "Q11_1", pw, sqlContext)
+        case "Q11_2" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q11_2,
+          "Q11_2", pw, sqlContext)
+        case "Q12" => validationFailed = SnappyTestUtils.assertQuery(snc, SPVAQueries.Q12, "Q12",
+          pw, sqlContext)
         // scalastyle:off println
-        case _ => println("OK")
+        case _ =>   // scalastyle:off println
+          pw.println(s"Query ${q._1} will not  be executed.")
+          queryExecuted = false
+      }
+      if (queryExecuted) {
+        pw.println(s"Execution completed for query ${q._1}")
+      }
+      if (validationFailed) {
+        failedQueries = SnappyTestUtils.addToFailedQueryList(failedQueries, q._1)
       }
     }
+    return failedQueries;
   }
 
   def dropTables(snc: SnappyContext): Unit = {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/spva/ValidateSPVAQueriesJob.scala b/dtests/src/test/scala/io/snappydata/hydra/spva/ValidateSPVAQueriesJob.scala
index 37202a16b9..66bdcbb123 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/spva/ValidateSPVAQueriesJob.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/spva/ValidateSPVAQueriesJob.scala
@@ -19,10 +19,10 @@ package io.snappydata.hydra.spva
 import java.io.{File, FileOutputStream, PrintWriter}
 
 import com.typesafe.config.Config
-import io.snappydata.hydra.spva
+import io.snappydata.hydra.{SnappyTestUtils, spva}
+
 import org.apache.spark.SparkContext
 import org.apache.spark.sql._
-
 import scala.util.{Failure, Success, Try}
 
 
@@ -31,24 +31,20 @@ object ValidateSPVAQueriesJob extends SnappySQLJob {
     val snc = snappySession.sqlContext
 
     def getCurrentDirectory = new java.io.File(".").getCanonicalPath
-
+    // scalastyle:off println
     val tableType = jobConfig.getString("tableType")
     val outputFile = "ValidateSPVAQueries_" + tableType + "_" + jobConfig.getString("logFileName")
     val pw = new PrintWriter(new FileOutputStream(new File(outputFile), true));
     val sc = SparkContext.getOrCreate()
     val sqlContext = SQLContext.getOrCreate(sc)
+    SnappyTestUtils.validateFullResultSet = true
+    SnappyTestUtils.numRowsValidation = false
     Try {
       val dataFilesLocation = jobConfig.getString("dataFilesLocation")
       snc.setConf("dataFilesLocation", dataFilesLocation)
       spva.SPVAQueries.snc = snc
       SPVAQueries.dataFilesLocation = dataFilesLocation
-      pw.println(s"createAndLoadSparkTables Test started at : " + System.currentTimeMillis)
-      // sqlContext.sql("CREATE SCHEMA IF NOT EXISTS SPD")
-      SPVATestUtil.createAndLoadSparkTables(sqlContext)
-      println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
-      pw.println(s"createAndLoadSparkTables Test completed successfully at : " + System
-          .currentTimeMillis)
+
       pw.println(s"ValidateQueriesFullResultSet for ${tableType} tables Queries Test started at" +
           s" :  " + System.currentTimeMillis)
       SPVATestUtil.validateQueriesFullResultSet(snc, tableType, pw, sqlContext)

From bd2f336e448e0a3e32efb7b58c3fc81e7344721d Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Fri, 30 Aug 2019 12:40:51 +0530
Subject: [PATCH 29/33] - Missed changes while resolving conflict.

---
 .../java/io/snappydata/hydra/cluster/SnappyTest.java   | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index ab1bdffcd0..2484f5f0d1 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -2922,6 +2922,16 @@ public String checkForJVMCrashOrOOME() {
     return msg.toString();
   }
 
+  /**
+   * Create and start snappy locator using snappy-locators.sh script.
+   */
+  public static synchronized void HydraTask_createAndStartSnappyLocator() {
+    int num = (int) SnappyBB.getBB().getSharedCounters().incrementAndRead(SnappyBB.locatorsStarted);
+    if (num == 1) {
+      snappyTest.startSnappyLocator();
+    }
+  }
+
   /**
    * Create and start snappy server.
    */

From 40ffba5bb9b0c57b8dea0f0fc4e15d8dcb587a63 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Fri, 6 Sep 2019 13:40:17 +0530
Subject: [PATCH 30/33] - Some changes in the validation for dataframes, using
 dataframe APIs instead of file comparison.

---
 .../snappydata/hydra/cluster/SnappyPrms.java  | 16 +++---
 .../snappydata/hydra/cluster/SnappyTest.java  | 13 ++---
 .../io/snappydata/hydra/SnappyTestUtils.scala | 50 ++++++++++++-------
 3 files changed, 43 insertions(+), 36 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
index 9f89c6ea30..3756216954 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyPrms.java
@@ -18,6 +18,8 @@
 
 import org.apache.commons.lang.StringUtils;
 
+import java.util.Collections;
+import java.util.List;
 import java.util.Vector;
 
 import hydra.BasePrms;
@@ -741,13 +743,10 @@ public static String getNewLogLevel(){
    */
   public static Long expectedExceptions;
 
-  public static String[] getExpectedExceptionList() {
+  public static List<String> getExpectedExceptionList() {
     Long key = expectedExceptions;
     Vector statements = tab().vecAt(key, new HydraVector());
-    String[] strArr = new String[statements.size()];
-    for (int i = 0; i < statements.size(); i++) {
-      strArr[i] = (String)statements.elementAt(i);
-    }
+    List<String> strArr = Collections.list(statements.elements());
     return strArr;
   }
 
@@ -757,13 +756,10 @@ public static String[] getExpectedExceptionList() {
    */
   public static Long expectedExceptionsForHA;
 
-  public static String[] getExpectedExceptionListForHA() {
+  public static List<String> getExpectedExceptionListForHA() {
     Long key = expectedExceptionsForHA;
     Vector statements = tab().vecAt(key, new HydraVector());
-    String[] strArr = new String[statements.size()];
-    for (int i = 0; i < statements.size(); i++) {
-      strArr[i] = (String)statements.elementAt(i);
-    }
+    List<String> strArr = Collections.list(statements.elements());
     return strArr;
   }
 
diff --git a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
index 2484f5f0d1..6bf28141e3 100644
--- a/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/cluster/SnappyTest.java
@@ -67,8 +67,7 @@ public class SnappyTest implements Serializable {
   protected static SnappyTest snappyTest;
   protected static HostDescription hd = TestConfig.getInstance().getMasterDescription()
       .getVmDescription().getHostDescription();
-  /*protected static HostDescription localhd = TestConfig.getInstance().getClientDescription
-      (RemoteTestModule.getMyClientName()).getVmDescription().getHostDescription();*/
+
   protected static char sep = hd.getFileSep();
   private static String gemfireHome = hd.getGemFireHome() + sep;
   protected static String productDir = gemfireHome + ".." + sep + "snappy" + sep;
@@ -2857,14 +2856,12 @@ public void checkSuspectStrings() {
     StringBuilder cmd = new StringBuilder();
     StringBuilder exceptedExcep = new StringBuilder();
 
-    String[] expectedExceptions = SnappyPrms.getExpectedExceptionList();
+    List<String> expectedExceptions = SnappyPrms.getExpectedExceptionList();
     if(cycleVms) {
-      List<String> exceptions = new ArrayList<String>(Arrays.asList(expectedExceptions));
-      exceptions.addAll(Arrays.asList(SnappyPrms.getExpectedExceptionListForHA()));
-      expectedExceptions = exceptions.toArray(expectedExceptions);
+      expectedExceptions.addAll(SnappyPrms.getExpectedExceptionListForHA());
     }
-    for (int i = 0; i < expectedExceptions.length; i++)
-      exceptedExcep.append(" | grep -v \"").append(expectedExceptions[i] + "\"");
+    for (int i = 0; i < expectedExceptions.size(); i++)
+      exceptedExcep.append(" | grep -v \"").append(expectedExceptions.get(i) + "\"");
 
     cmd.setLength(0);
     cmd.append("find " + getCurrentDirPath() + " -type f \\( -name \"*.log\" -not -iname " +
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 7fbdf589ce..708f3fd4c8 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -83,25 +83,30 @@ object SnappyTestUtils {
     var validationFailed = false
     var snappyDF: DataFrame = null
     snappyDF = snc.sql(sqlString)
-    val count = snappyDF.count
+    val snappyDFCount = snappyDF.count
     // scalastyle:off println
     pw.println(s"\n${logTime} Executing Query $queryNum ...")
     println(s"Query $queryNum")
     snappyDF.explain(true)
     if (numRowsValidation) {
-      pw.println(s"${logTime} Query ${queryNum} returned ${count} rows for ${tableType} table")
-      if (count != numRows) {
-        pw.println(s"${logTime} Result mismatch for query ${queryNum}: found ${count} rows but " +
-            s"expected ${numRows} rows.")
+      pw.println(s"${logTime} Query ${queryNum} returned ${snappyDFCount} rows for ${tableType}")
+      if (snappyDFCount != numRows) {
+        pw.println(s"${logTime} Result mismatch for query ${queryNum} found ${snappyDFCount} rows" +
+            s"but expected ${numRows} rows.")
         validationFailed = true
       }
       pw.flush()
     }
     var fullRSValidationFailed: Boolean = false
     if (validateFullResultSet) {
-      var sparkDF = sqlContext.sql(sqlString)
+      val sparkDF = sqlContext.sql(sqlString)
+      val sparkDFCount = sparkDF.count()
+      if(snappyDFCount != sparkDFCount) {
+        pw.println(s"Count difference observed in snappy and spark resultset for query " +
+            s"${queryNum}. Snappy returned ${snappyDFCount} and spark returned ${sparkDFCount}.")
+        fullRSValidationFailed = true
+      }
       fullRSValidationFailed = assertQuery(snc, snappyDF, sparkDF, queryNum, pw)
-
     }
     if (validationFailed) {
       pw.println(s"\n${logTime} NumRows validation failed for query ${queryNum} on ${tableType} " +
@@ -132,26 +137,37 @@ object SnappyTestUtils {
         sparkQueryFileName
     // pw.println(s"Spark query results are at : ${sparkDest}")
     val sparkFile: File = new java.io.File(sparkDest)
-
     try {
       if (!snappyFile.exists()) {
         // val snap_col1 = snappyDF.schema.fieldNames(0)
         // val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
-        val snappyDF1 = snappyDF.repartition(1) // .sortWithinPartitions(snap_col1, snap_col: _*)
-        writeToFile(snappyDF, snappyDest, snc)
-        // writeResultSetToCsv(snappyDF, snappyFile)
+        // snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
+        writeToFile(snappyDF.repartition((1)), snappyDest, snc)
         pw.println(s"${logTime} Snappy result collected in : ${snappyDest}")
       }
       if (!sparkFile.exists()) {
         // val col1 = sparkDF.schema.fieldNames(0)
         // val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
-        val sparkDF1 = sparkDF.repartition(1) // .sortWithinPartitions(col1, col: _*)
-        writeToFile(sparkDF, sparkDest, snc)
-        // writeResultSetToCsv(sparkDF, sparkFile)
+        // sparkDF.repartition(1).sortWithinPartitions(col1, col: _*)
+        writeToFile(sparkDF.repartition(1), sparkDest, snc)
         pw.println(s"${logTime} Spark result collected in : ${sparkDest}")
       }
-      fullRSValidationFailed = compareFiles(snappyFile, sparkFile, pw, queryNum,
-        fullRSValidationFailed)
+      val missingDF = sparkDF.except(snappyDF).collectAsList()
+      val unexpectedDF = snappyDF.except(sparkDF).collectAsList()
+      if(missingDF.size() > 0 || unexpectedDF.size() > 0) {
+        fullRSValidationFailed = true
+        pw.println("Found mismatch in resultset")
+        if(missingDF.size() > 0) {
+          pw.println(s"The following ${missingDF.size} rows were missing in snappyDF:\n " +
+              missingDF.forEach(println))
+        }
+        if(unexpectedDF.size() > 0) {
+          pw.println(s"The following ${unexpectedDF.size} rows were unexpected in snappyDF:\n" +
+              missingDF.forEach(println))
+        }
+      }
+      // fullRSValidationFailed
+      //    = compareFiles(snappyFile, sparkFile, pw, queryNum, fullRSValidationFailed)
     } catch {
       case ex: Exception => {
         fullRSValidationFailed = true
@@ -486,7 +502,6 @@ object SnappyTestUtils {
         val snap_col = snappyDF.schema.fieldNames.filter(!_.equals(snap_col1)).toSeq
         snappyDF = snappyDF.repartition(1).sortWithinPartitions(snap_col1, snap_col: _*)
         writeToFile(snappyDF, snappyDest, snc)
-        // writeResultSetToCsv(snappyDF, snappyFile)
         pw.println(s"${logTime} ${queryNum} Result Collected in file $snappyDest")
       }
       if (!goldenFile.exists()) {
@@ -501,7 +516,6 @@ object SnappyTestUtils {
         val col = goldenDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
         goldenDF = goldenDF.repartition(1).sortWithinPartitions(col1, col: _*)
         writeToFile(goldenDF, sortedGoldenDest, snc)
-        // writeResultSetToCsv(goldenDF, sortedGoldenFile)
         pw.println(s"${logTime} ${queryNum} Result Collected in file ${sortedGoldenDest}")
       } else {
         pw.println(s"${logTime} No results in query result file for $queryNum.")

From 6a12a57aa4ec6b56bf12a7f06fb2d86583852df0 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Fri, 6 Sep 2019 15:03:20 +0530
Subject: [PATCH 31/33] - Fixing compilation errors.

---
 .../scala/io/snappydata/hydra/SnappyTestUtils.scala  | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 708f3fd4c8..ab0d3eeff5 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -158,12 +158,14 @@ object SnappyTestUtils {
         fullRSValidationFailed = true
         pw.println("Found mismatch in resultset")
         if(missingDF.size() > 0) {
-          pw.println(s"The following ${missingDF.size} rows were missing in snappyDF:\n " +
-              missingDF.forEach(println))
+          pw.println(s"The following ${missingDF.size} rows were missing in snappyDF:\n ")
+          for(i <- 0 to missingDF.size())
+            pw.println(missingDF.get(i))
         }
         if(unexpectedDF.size() > 0) {
-          pw.println(s"The following ${unexpectedDF.size} rows were unexpected in snappyDF:\n" +
-              missingDF.forEach(println))
+          pw.println(s"The following ${unexpectedDF.size} rows were unexpected in snappyDF:\n")
+          for(i <- 0 to unexpectedDF.size())
+            pw.println(unexpectedDF.get(i))
         }
       }
       // fullRSValidationFailed
@@ -441,7 +443,7 @@ object SnappyTestUtils {
     }
 
     // check if the mismatch is due to decimal, and can be ignored
-    if ((missing.size() > 0) && missing.size() == unexpected.size()) {
+    if ((missing.size() > 0) && (missing.size() == unexpected.size())) {
       Collections.sort(missing)
       Collections.sort(unexpected)
       for (i <- 0 until missing.size()) {

From 609714d60ddaa098f7dafa273f87fd78bf65195e Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Fri, 27 Sep 2019 14:36:26 +0530
Subject: [PATCH 32/33] - Changing the way we compare dataframes, use dataframe
 apis.

---
 .../adAnalytics/SnappyAdAnalyticsTest.java    | 11 ++-
 .../hydra/testDMLOps/SnappyDMLOpsUtil.java    |  4 --
 .../io/snappydata/hydra/SnappyTestUtils.scala | 72 ++++++++++++-------
 .../snappydata/hydra/spva/SPVATestUtil.scala  | 26 ++-----
 4 files changed, 58 insertions(+), 55 deletions(-)

diff --git a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
index fb22871ac8..8d9d456d69 100644
--- a/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
+++ b/dtests/src/test/java/io/snappydata/hydra/adAnalytics/SnappyAdAnalyticsTest.java
@@ -375,15 +375,14 @@ protected void executeSnappyStreamingJob(Vector jobClassNames, String logFileNam
         Log.getLogWriter().info("JobID is : " + jobID);
         SnappyBB.getBB().getSharedMap().put(appName, jobID);
         for (int j = 0; j < 3; j++) {
+          if (!getJobStatus(jobID)) {
+            throw new TestException("Got Exception while executing streaming job. Please check " +
+                "the job status output.");
+          }
           try {
             Thread.sleep(10 * 1000);
           } catch (InterruptedException ie) {
           }
-          getJobStatus(jobID);
-        }
-        if(!checkJobStatus(jobID)){
-          throw new TestException("Got Exception while executing streaming job. Please check " +
-              "the job status output.");
         }
       }
     }
@@ -596,7 +595,7 @@ public boolean getJobStatus(String jobID){
             return false;
           break;
         }
-      } try { Thread.sleep(10*1000);} catch(InterruptedException ie) { }
+      }
     } catch (IOException ie){
       Log.getLogWriter().info("Got exception while accessing current dir");
     }
diff --git a/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java b/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java
index d6dc9db667..e3215e4ec7 100644
--- a/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java
+++ b/dtests/src/test/java/io/snappydata/hydra/testDMLOps/SnappyDMLOpsUtil.java
@@ -970,8 +970,6 @@ public void performUpdate() {
         if (stmt.toUpperCase().contains("SELECT"))
           getAndExecuteSelect(dConn,stmt,true);
         Log.getLogWriter().info("Executing " + stmt + " on derby.");
-        if (stmt.toUpperCase().contains("SELECT"))
-          getAndExecuteSelect(dConn, stmt, true);
         int derbyRows = dConn.createStatement().executeUpdate(stmt);
         Log.getLogWriter().info("Updated " + derbyRows + " rows in derby.");
         if (numRows != derbyRows) {
@@ -1021,8 +1019,6 @@ public void performDelete() {
         if (stmt.toUpperCase().contains("SELECT"))
           getAndExecuteSelect(dConn,stmt,true);
         Log.getLogWriter().info("Executing " + stmt + " on derby.");
-        if (stmt.toUpperCase().contains("SELECT"))
-          getAndExecuteSelect(dConn, stmt, true);
         int derbyRows = dConn.createStatement().executeUpdate(stmt);
         Log.getLogWriter().info("Deleted " + derbyRows + " rows in derby.");
         if (numRows != derbyRows) {
diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index ab0d3eeff5..3ee5d23bbe 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -82,7 +82,7 @@ object SnappyTestUtils {
       pw: PrintWriter, sqlContext: SQLContext): Boolean = {
     var validationFailed = false
     var snappyDF: DataFrame = null
-    snappyDF = snc.sql(sqlString)
+    snappyDF = snc.sql(sqlString).cache()
     val snappyDFCount = snappyDF.count
     // scalastyle:off println
     pw.println(s"\n${logTime} Executing Query $queryNum ...")
@@ -99,7 +99,7 @@ object SnappyTestUtils {
     }
     var fullRSValidationFailed: Boolean = false
     if (validateFullResultSet) {
-      val sparkDF = sqlContext.sql(sqlString)
+      val sparkDF = sqlContext.sql(sqlString).cache()
       val sparkDFCount = sparkDF.count()
       if(snappyDFCount != sparkDFCount) {
         pw.println(s"Count difference observed in snappy and spark resultset for query " +
@@ -125,18 +125,16 @@ object SnappyTestUtils {
   def assertQuery(snc: SnappyContext, snappyDF: DataFrame, sparkDF: DataFrame, queryNum: String,
       pw: PrintWriter): Boolean = {
     var fullRSValidationFailed = false
-    val snappyQueryFileName = s"Snappy_${queryNum}"
-    val snappyDest: String = getQueryResultDir("snappyResults") +
-        File.separator + snappyQueryFileName
+
+    val snappyResFileName = s"Snappy_${queryNum}"
+    val snappyDest: String = getQueryResultDir("snappyResults") + File.separator + snappyResFileName
     // scalastyle:off println
-    // pw.println(s"Snappy query results are at : ${snappyDest}")
     val snappyFile: File = new java.io.File(snappyDest)
 
-    val sparkQueryFileName = s"Spark_${queryNum}"
-    val sparkDest: String = getQueryResultDir("sparkResults") + File.separator +
-        sparkQueryFileName
-    // pw.println(s"Spark query results are at : ${sparkDest}")
+    val sparkResFileName = s"Spark_${queryNum}"
+    val sparkDest: String = getQueryResultDir("sparkResults") + File.separator + sparkResFileName
     val sparkFile: File = new java.io.File(sparkDest)
+
     try {
       if (!snappyFile.exists()) {
         // val snap_col1 = snappyDF.schema.fieldNames(0)
@@ -145,6 +143,7 @@ object SnappyTestUtils {
         writeToFile(snappyDF.repartition((1)), snappyDest, snc)
         pw.println(s"${logTime} Snappy result collected in : ${snappyDest}")
       }
+
       if (!sparkFile.exists()) {
         // val col1 = sparkDF.schema.fieldNames(0)
         // val col = sparkDF.schema.fieldNames.filter(!_.equals(col1)).toSeq
@@ -152,20 +151,44 @@ object SnappyTestUtils {
         writeToFile(sparkDF.repartition(1), sparkDest, snc)
         pw.println(s"${logTime} Spark result collected in : ${sparkDest}")
       }
-      val missingDF = sparkDF.except(snappyDF).collectAsList()
-      val unexpectedDF = snappyDF.except(sparkDF).collectAsList()
-      if(missingDF.size() > 0 || unexpectedDF.size() > 0) {
-        fullRSValidationFailed = true
-        pw.println("Found mismatch in resultset")
-        if(missingDF.size() > 0) {
-          pw.println(s"The following ${missingDF.size} rows were missing in snappyDF:\n ")
-          for(i <- 0 to missingDF.size())
-            pw.println(missingDF.get(i))
+      val expectedFile = sparkFile.listFiles.filter(_.getName.endsWith(".csv"))
+      val sparkDF2 = snc.read.format("com.databricks.spark.csv")
+          .option("header", "false")
+          .option("inferSchema", "false")
+          .option("nullValue", "NULL")
+          .option("maxCharsPerColumn", "4096")
+          .load(s"${expectedFile}")
+
+      val missingDF: Array[Row] = sparkDF2.except(snappyDF).sort(sparkDF2.columns(0)).collect()
+      val unexpectedDF: Array[Row] = snappyDF.except(sparkDF2).sort(sparkDF2.columns(0)).collect()
+
+      val aStr = new StringBuilder
+      if(missingDF.length > 0 || unexpectedDF.length > 0) {
+        pw.println(s"Found mismatch in resultset for query ${queryNum}... ")
+        if(missingDF.length > 0) {
+          aStr.append(s"The following ${missingDF.size} rows were missing in snappyDF:\n ")
+          for(i <- 0 to missingDF.size)
+            aStr.append(missingDF(i) + "\n")
         }
-        if(unexpectedDF.size() > 0) {
-          pw.println(s"The following ${unexpectedDF.size} rows were unexpected in snappyDF:\n")
-          for(i <- 0 to unexpectedDF.size())
-            pw.println(unexpectedDF.get(i))
+        if(unexpectedDF.length > 0) {
+          aStr.append(s"The following ${unexpectedDF.size} rows were unexpected in snappyDF:\n")
+          for(i <- 0 to unexpectedDF.size)
+            aStr.append(unexpectedDF(i) + "\n")
+        }
+
+        // check if the mismatch is due to decimal, and can be ignored
+        if (unexpectedDF.length == missingDF.length) {
+          for (i <- 0 until missingDF.size) {
+            if (!isIgnorable(missingDF(i).toString, unexpectedDF(i).toString)) {
+              fullRSValidationFailed = true
+            }
+          }
+          pw.println("This mismatch can be ignored.")
+          aStr.setLength(0) // data mismatch can be ignored
+        }
+        if(aStr.length > 0) {
+          pw.println(aStr)
+          fullRSValidationFailed = true
         }
       }
       // fullRSValidationFailed
@@ -182,7 +205,6 @@ object SnappyTestUtils {
     fullRSValidationFailed
   }
 
-
   def dataTypeConverter(row: Row): Row = {
     val md = row.toSeq.map {
       // case d: Double => "%18.1f".format(d).trim().toDouble
@@ -216,7 +238,7 @@ object SnappyTestUtils {
         })
         sb.toString()
       }).write.format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat").option(
-      "header", false).save(dest)
+      "header", true).save(dest)
   }
 
   /*
diff --git a/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala b/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala
index 7a012c9188..2af57e23a4 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/spva/SPVATestUtil.scala
@@ -29,34 +29,26 @@ object SPVATestUtil {
   def createAndLoadReplicatedTables(snc: SnappyContext): Unit = {
 
     snc.sql(SPVAQueries.patients_table)
-    SPVAQueries.patients(snc).write.insertInto("patient")
 
     snc.sql(SPVAQueries.encounters_table)
-    SPVAQueries.encounters(snc).write.insertInto("encounters")
 
     snc.sql(SPVAQueries.allergies_table)
-    SPVAQueries.allergies(snc).write.insertInto("allergies")
 
     snc.sql(SPVAQueries.careplans_table)
-    SPVAQueries.careplans(snc).write.insertInto("careplans")
 
     snc.sql(SPVAQueries.conditions_table)
-    SPVAQueries.conditions(snc).write.insertInto("conditions")
 
     snc.sql(SPVAQueries.imaging_studies_table)
-    SPVAQueries.imaging_studies(snc).write.insertInto("imaging_studies")
 
     snc.sql(SPVAQueries.immunizations_table)
-    SPVAQueries.immunizations(snc).write.insertInto("immunizations")
 
     snc.sql(SPVAQueries.medications_table)
-    SPVAQueries.medications(snc).write.insertInto("medications")
 
     snc.sql(SPVAQueries.observations_table)
-    SPVAQueries.observations(snc).write.insertInto("observations")
 
     snc.sql(SPVAQueries.procedures_table)
-    SPVAQueries.procedures(snc).write.insertInto("procedures")
+
+    loadTables(snc)
   }
 
   def createAndLoadPartitionedTables(snc: SnappyContext): Unit = {
@@ -91,16 +83,7 @@ object SPVATestUtil {
         " colocate_with 'PATIENTS', buckets '12', redundancy '1', PERSISTENT 'sync', " +
         " EVICTION_BY 'LRUHEAPPERCENT')")
 
-    SPVAQueries.patients(snc).write.insertInto("patients")
-    SPVAQueries.encounters(snc).write.insertInto("encounters")
-    SPVAQueries.allergies(snc).write.insertInto("allergies")
-    SPVAQueries.careplans(snc).write.insertInto("careplans")
-    SPVAQueries.conditions(snc).write.insertInto("conditions")
-    SPVAQueries.imaging_studies(snc).write.insertInto("imaging_studies")
-    SPVAQueries.immunizations(snc).write.insertInto("immunizations")
-    SPVAQueries.medications(snc).write.insertInto("medications")
-    SPVAQueries.observations(snc).write.insertInto("observations")
-    SPVAQueries.procedures(snc).write.insertInto("procedures")
+    loadTables(snc)
   }
 
   def createAndLoadColumnTables(snc: SnappyContext): Unit = {
@@ -134,7 +117,10 @@ object SPVATestUtil {
     snc.sql(SPVAQueries.procedures_table + " using column options(PARTITION_BY 'PATIENT', " +
         " colocate_with 'PATIENTS', buckets '12', redundancy '1', PERSISTENT 'sync', " +
         " EVICTION_BY 'LRUHEAPPERCENT')")
+    loadTables(snc)
+  }
 
+  def loadTables(snc: SnappyContext): Unit = {
     SPVAQueries.patients(snc).write.insertInto("patients")
     SPVAQueries.encounters(snc).write.insertInto("encounters")
     SPVAQueries.allergies(snc).write.insertInto("allergies")

From 8b178eba1adb06b6df2450073a9c010a86c293f5 Mon Sep 17 00:00:00 2001
From: sonal <sagarwal@tibco.com>
Date: Thu, 10 Oct 2019 12:11:48 +0530
Subject: [PATCH 33/33] - Minor fix

---
 dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
index 3ee5d23bbe..9c4e1646f3 100644
--- a/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
+++ b/dtests/src/test/scala/io/snappydata/hydra/SnappyTestUtils.scala
@@ -157,7 +157,7 @@ object SnappyTestUtils {
           .option("inferSchema", "false")
           .option("nullValue", "NULL")
           .option("maxCharsPerColumn", "4096")
-          .load(s"${expectedFile}")
+          .load(s"${expectedFile.iterator.next().getAbsolutePath}")
 
       val missingDF: Array[Row] = sparkDF2.except(snappyDF).sort(sparkDF2.columns(0)).collect()
       val unexpectedDF: Array[Row] = snappyDF.except(sparkDF2).sort(sparkDF2.columns(0)).collect()