Skip to content

Commit

Permalink
Merged changes from downstream, test tweaks
Browse files Browse the repository at this point in the history
  • Loading branch information
Marek Paterczyk committed Jul 28, 2017
1 parent 9fa383d commit ec41269
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -211,18 +211,7 @@ public void update(CRUDOperationContext ctx,
measure.begin("ctx.addDocument");
DocTranslator.TranslatedDoc translatedDoc=translator.toJson(document);
DocCtx doc=new DocCtx(translatedDoc.doc,translatedDoc.rmd);
resultDocs.add(doc);
doc.startModifications();
if (memoryMonitor != null) {
// if memory threshold is exceeded, this will throw an Error
memoryMonitor.apply(doc);
// an Error means *inconsistent update operation*:
// some batches will be updated, some don't
// no hooks will fire for updated batches
// counts sent to client will be set to zero
// TODO: I perceive this as a problem with updates and hooks impl in general
// we need to run hooks per batch (see https://github.com/lightblue-platform/lightblue-mongo/issues/378)
}
measure.end("ctx.addDocument");
// From now on: doc contains the working copy, and doc.originalDoc contains the original copy
if (updateDoc(md,doc,measure)) {
Expand Down Expand Up @@ -269,13 +258,23 @@ public void update(CRUDOperationContext ctx,
int di=0;
// Only add the docs that were not lost
for(DocCtx d:docUpdateAttempts) {
if(!ci.lostDocs.contains(di))
if(!ci.lostDocs.contains(di)) {
enforceMemoryLimit(d);
resultDocs.add(d);
}
di++;
}
}
doc.setCRUDOperationPerformed(CRUDOperation.UPDATE);
doc.setUpdatedDocument(doc);
} catch (Error e) {
if (MongoCrudConstants.ERROR_RESULT_SIZE_TOO_LARGE.equals(e.getErrorCode())) {
throw e;
} else {
LOGGER.warn("Update exception for document {}: {}", docIndex, e);
doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString()));
hasErrors = true;
}
} catch (Exception e) {
LOGGER.warn("Update exception for document {}: {}", docIndex, e);
doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString()));
Expand Down Expand Up @@ -307,8 +306,10 @@ public void update(CRUDOperationContext ctx,
numUpdated+=docUpdateAttempts.size()-batchStartIndex-ci.errors.size()-ci.lostDocs.size();
int di=0;
for(DocCtx d:docUpdateAttempts) {
if(!ci.lostDocs.contains(di))
if(!ci.lostDocs.contains(di)) {
enforceMemoryLimit(d);
resultDocs.add(d);
}
di++;
}
}
Expand All @@ -326,6 +327,19 @@ public void update(CRUDOperationContext ctx,
METRICS.debug("IterateAndUpdate:\n{}",measure);
}

private void enforceMemoryLimit(DocCtx doc) {
if (memoryMonitor != null) {
// if memory threshold is exceeded, this will throw an Error
memoryMonitor.apply(doc);
// an Error means *inconsistent update operation*:
// some batches will be updated, some don't
// no hooks will fire for updated batches
// counts sent to client will be set to zero
// TODO: I perceive this as a problem with updates and hooks impl in general
// we need to run hooks per batch (see https://github.com/lightblue-platform/lightblue-mongo/issues/378)
}
}


private boolean updateDoc(EntityMetadata md,
JsonDoc doc,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,7 @@ public CRUDUpdateResponse update(CRUDOperationContext ctx,
docUpdater.setResultSizeThresholds(ctx.getFactory().getMaxResultSetSizeForWritesB(), ctx.getFactory().getWarnResultSetSizeB(), query);
ctx.setProperty(PROP_UPDATER, docUpdater);
docUpdater.update(ctx, coll, md, response, mongoQuery);
LOGGER.debug("ctx.inputDocuments size after update, before hooks: "+docUpdater.getDataSizeB()+"B");
ctx.getHookManager().setQueuedHooksSizeThresholds(ctx.getFactory().getMaxResultSetSizeForWritesB(), ctx.getFactory().getWarnResultSetSizeB(), query, docUpdater.getDataSizeB());
ctx.getHookManager().queueHooks(ctx);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
import com.redhat.lightblue.mongo.common.MongoDataStore;
import com.redhat.lightblue.mongo.config.MongoConfiguration;
import com.redhat.lightblue.util.JsonDoc;
import com.redhat.lightblue.util.JsonUtils;

/**
* Memory limit tests.
Expand Down Expand Up @@ -129,14 +128,12 @@ private void setupTestDataAndMetadata() throws IOException {
emd = getMd("./testMetadata_cap.json");
controller.afterUpdateEntityInfo(null, emd.getEntityInfo(), false);
testDoc = new JsonDoc(loadJsonNode("./testData_cap.json"));
testDocSize = JsonUtils.size(testDoc.getRoot())+27; // 27 is for mongo generated _id
}

private void setupTestDataAndMetadataWithHook() throws IOException {
emd = getMd("./testMetadata_cap_hook.json");
controller.afterUpdateEntityInfo(null, emd.getEntityInfo(), false);
testDoc = new JsonDoc(loadJsonNode("./testData_cap.json"));
testDocSize = JsonUtils.size(testDoc.getRoot())+27; // 27 is for mongo generated _id
}

private void addDocument(CRUDOperationContext ctx,JsonDoc doc) {
Expand All @@ -162,8 +159,6 @@ private List<DocCtx> streamToList(CRUDOperationContext ctx) {

EntityMetadata emd;
JsonDoc testDoc;
int testDocSize;
final int updaterDocCopies = 3;

private void insertDocs(int count) throws IOException {
TestCRUDOperationContext ctx = new TestCRUDOperationContext("test", CRUDOperation.INSERT);
Expand Down Expand Up @@ -223,7 +218,8 @@ public void updateDocuments_LimitNotExceeded() throws Exception {
TestCRUDOperationContext ctx = new TestCRUDOperationContext("test", CRUDOperation.UPDATE);
ctx.add(emd);

ctx.getFactory().setMaxResultSetSizeForWritesB(11*updaterDocCopies*testDocSize);
// 74 docs is 25798, 11 docs is ~3835B
ctx.getFactory().setMaxResultSetSizeForWritesB(3835);
CRUDUpdateResponse response = controller.update(ctx,
query("{'field':'field1','op':'$eq','rvalue':'f1'}"),
update("{ '$set': { 'field2' : 'f2-updated' } }"),
Expand Down Expand Up @@ -252,35 +248,37 @@ public void updateDocuments_LimitNotExceeded() throws Exception {
@Test
public void updateDocuments_LimitExceeded() throws Exception {

int count = MongoCRUDController.DEFAULT_BATCH_SIZE+10;
// 2 batches. One batch + 10 would be updated in full, because the memory limit is enforced after commit.
int count = 2*MongoCRUDController.DEFAULT_BATCH_SIZE+10;

setupTestDataAndMetadata();
insertDocs(count);

TestCRUDOperationContext ctx = new TestCRUDOperationContext("test", CRUDOperation.UPDATE);
ctx.add(emd);

ctx.getFactory().setMaxResultSetSizeForWritesB((MongoCRUDController.DEFAULT_BATCH_SIZE+1)*updaterDocCopies*testDocSize);
// 74 docs is 25798, 66 docs is ~22310
ctx.getFactory().setMaxResultSetSizeForWritesB(22310);
CRUDUpdateResponse response = controller.update(ctx,
query("{'field':'field1','op':'$eq','rvalue':'f1'}"),
update("{ '$set': { 'field2' : 'f2-updated' } }"),
projection("{'field':'*'}"));

// this is wrong - one batch was updated successfully
// see IterateAndUpdate.java:219 for more info
Assert.assertEquals(1, ctx.getErrors().size());
Assert.assertEquals(MongoCrudConstants.ERROR_RESULT_SIZE_TOO_LARGE, ctx.getErrors().get(0).getErrorCode());

// this is misleading - 2 batches was updated successfully
// see IterateAndUpdate#enforceMemoryLimit for more info
Assert.assertEquals(0, response.getNumMatched());
Assert.assertEquals(0, response.getNumUpdated());
Assert.assertEquals(0, response.getNumFailed());

Assert.assertEquals(1, ctx.getErrors().size());
Assert.assertEquals(MongoCrudConstants.ERROR_RESULT_SIZE_TOO_LARGE, ctx.getErrors().get(0).getErrorCode());

DBCursor cursor = db.getCollection("data").find();

int i = 0;
while (cursor.hasNext()) {
String field2value = cursor.next().get("field2").toString();
if (i < MongoCRUDController.DEFAULT_BATCH_SIZE) {
if (i < 2*MongoCRUDController.DEFAULT_BATCH_SIZE) {
assertEquals("Expecting first batch to be updated successfully", "f2-updated", field2value);
} else {
assertEquals("Expecting 2nd batch not to be updated, because this is where memory limit was reached", "f2", field2value);
Expand Down Expand Up @@ -308,22 +306,23 @@ public void updateDocuments_LimitExceededByHookManager() throws Exception {
TestCRUDOperationContext ctx = new TestCRUDOperationContext("test", CRUDOperation.UPDATE);
ctx.add(emd);

ctx.getFactory().setMaxResultSetSizeForWritesB((count+1)*updaterDocCopies*testDocSize); // setting threshold above the size of docs to update
// 25798 B is the ctx.inputDocuments size after update, but before hooks
ctx.getFactory().setMaxResultSetSizeForWritesB(25798+10);

CRUDUpdateResponse response = controller.update(ctx,
query("{'field':'field1','op':'$eq','rvalue':'f1'}"),
update("{ '$set': { 'field2' : 'f2-updated' } }"),
projection("{'field':'*'}"));

// expecting memory limit to kick in during hook processing
Assert.assertEquals(1, ctx.getErrors().size());
Assert.assertEquals(Response.ERR_RESULT_SIZE_TOO_LARGE, ctx.getErrors().get(0).getErrorCode());

// all updates applied successfully
Assert.assertEquals(count, response.getNumMatched());
Assert.assertEquals(count, response.getNumUpdated());
Assert.assertEquals(0, response.getNumFailed());

// expecting memory limit to kick in during hook processing
Assert.assertEquals(1, ctx.getErrors().size());
Assert.assertEquals(Response.ERR_RESULT_SIZE_TOO_LARGE, ctx.getErrors().get(0).getErrorCode());

DBCursor cursor = db.getCollection("data").find();

int i = 0;
Expand Down

0 comments on commit ec41269

Please sign in to comment.