Skip to content
This repository has been archived by the owner on Feb 1, 2022. It is now read-only.

Commit

Permalink
PHP-1464: GridFS should not drop dupes when creating index
Browse files Browse the repository at this point in the history
Additionally, we'll check for an exception after creating the index so that we can return early (instead of attempting to insert the first chunk and inevitably moving to cleanup).
  • Loading branch information
jmikola committed Jul 1, 2015
1 parent c0fa5e3 commit 49d6485
Show file tree
Hide file tree
Showing 2 changed files with 160 additions and 2 deletions.
21 changes: 19 additions & 2 deletions gridfs/gridfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ void php_mongo_ensure_gridfs_index(zval *return_value, zval *this_ptr TSRMLS_DC)
{
zval *index, *options;

/* ensure index on chunks.n */
/* ensure unique index on chunks collection's "files_id" and "n" fields */
MAKE_STD_ZVAL(index);
array_init(index);
add_assoc_long(index, "files_id", 1);
Expand All @@ -161,7 +161,6 @@ void php_mongo_ensure_gridfs_index(zval *return_value, zval *this_ptr TSRMLS_DC)
MAKE_STD_ZVAL(options);
array_init(options);
add_assoc_bool(options, "unique", 1);
add_assoc_bool(options, "dropDups", 1);

MONGO_METHOD2(MongoCollection, ensureIndex, return_value, getThis(), index, options);

Expand Down Expand Up @@ -427,6 +426,12 @@ PHP_METHOD(MongoGridFS, storeBytes)
php_mongo_ensure_gridfs_index(&temp, chunks TSRMLS_CC);
zval_dtor(&temp);

/* Abort if we could not create the unique index on fs.chunks */
if (EG(exception)) {
gridfs_rewrite_cursor_exception(TSRMLS_C);
RETURN_FALSE;
}

if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|aa/", &bytes, &bytes_len, &extra, &options) == FAILURE) {
return;
}
Expand Down Expand Up @@ -654,6 +659,12 @@ PHP_METHOD(MongoGridFS, storeFile)
php_mongo_ensure_gridfs_index(&temp, chunks TSRMLS_CC);
zval_dtor(&temp);

/* Abort if we could not create the unique index on fs.chunks */
if (EG(exception)) {
gridfs_rewrite_cursor_exception(TSRMLS_C);
RETURN_FALSE;
}

if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z|aa/", &fh, &extra, &options) == FAILURE) {
return;
}
Expand Down Expand Up @@ -944,6 +955,12 @@ PHP_METHOD(MongoGridFS, remove)
php_mongo_ensure_gridfs_index(&chunktemp, chunks TSRMLS_CC);
zval_dtor(&chunktemp);

/* Abort if we could not create the unique index on fs.chunks */
if (EG(exception)) {
gridfs_rewrite_cursor_exception(TSRMLS_C);
RETURN_FALSE;
}

if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|za/", &criteria, &options) == FAILURE) {
return;
}
Expand Down
141 changes: 141 additions & 0 deletions tests/generic/bug01464.phpt
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
--TEST--
Test for PHP-1464: GridFS should not drop dupes when creating index
--SKIPIF--
<?php require_once "tests/utils/standalone.inc" ?>
--FILE--
<?php
require_once "tests/utils/server.inc";

$host = MongoShellServer::getStandaloneInfo();

$mc = new MongoClient($host);
$db = $mc->selectDB(dbname());

$filesId = new MongoId('000000000000000000000002');
$data = new MongoBinData('foo', MongoBinData::BYTE_ARRAY);

// Drop files collection
$filesCollection = $db->selectCollection('fs.files');
$filesCollection->drop();

// Drop chunks collection and insert duplicate, orphan chunks
$chunksCollection = $db->selectCollection('fs.chunks');
$chunksCollection->drop();
$chunksCollection->insert(array(
'_id' => new MongoId('000000000000000000000002'),
'files_id' => new MongoId('000000000000000000000001'),
'n' => 0,
'data' => new MongoBinData('foo', MongoBinData::BYTE_ARRAY),
));
$chunksCollection->insert(array(
'_id' => new MongoId('000000000000000000000003'),
'files_id' => new MongoId('000000000000000000000001'),
'n' => 0,
'data' => new MongoBinData('bar', MongoBinData::BYTE_ARRAY),
));

// Test three methods that start by ensuring the unique index
echo "MongoGridFS::storeBytes():\n";

try {
$db->getGridFS()->storeBytes('foo');
} catch (MongoGridFSException $e) {
echo $e->getMessage(), "\n";
}

echo "\nMongoGridFS::storeFile():\n";

try {
$db->getGridFS()->storeFile(__FILE__);
} catch (MongoGridFSException $e) {
echo $e->getMessage(), "\n";
}

echo "\nMongoGridFS::remove():\n";

try {
$db->getGridFS()->remove();
} catch (MongoGridFSException $e) {
echo $e->getMessage(), "\n";
}

echo "\nDumping fs.files:\n";

foreach ($filesCollection->find() as $file) {
var_dump($file);
}

echo "\nDumping fs.chunks:\n";

foreach ($chunksCollection->find() as $chunk) {
var_dump($chunk);
}

?>
==DONE==
--CLEAN--
<?php
require_once "tests/utils/server.inc";

// Ensure our duplicate chunks are removed
$host = MongoShellServer::getStandaloneInfo();
$mc = new MongoClient($host);
$mc->selectDB(dbname())->getGridFS()->drop();

?>
--EXPECTF--
MongoGridFS::storeBytes():
Could not store file: %s:%d: %SE11000 duplicate key error index: %s.fs.chunks.$files_id_1_n_1%sdup key: { : ObjectId('000000000000000000000001'), : 0 }

MongoGridFS::storeFile():
Could not store file: %s:%d: %SE11000 duplicate key error index: %s.fs.chunks.$files_id_1_n_1%sdup key: { : ObjectId('000000000000000000000001'), : 0 }

MongoGridFS::remove():
Could not store file: %s:%d: %SE11000 duplicate key error index: %s.fs.chunks.$files_id_1_n_1%sdup key: { : ObjectId('000000000000000000000001'), : 0 }

Dumping fs.files:

Dumping fs.chunks:
array(4) {
["_id"]=>
object(MongoId)#%d (1) {
["$id"]=>
string(24) "000000000000000000000002"
}
["files_id"]=>
object(MongoId)#%d (1) {
["$id"]=>
string(24) "000000000000000000000001"
}
["n"]=>
int(0)
["data"]=>
object(MongoBinData)#%d (2) {
["bin"]=>
string(3) "foo"
["type"]=>
int(2)
}
}
array(4) {
["_id"]=>
object(MongoId)#%d (1) {
["$id"]=>
string(24) "000000000000000000000003"
}
["files_id"]=>
object(MongoId)#%d (1) {
["$id"]=>
string(24) "000000000000000000000001"
}
["n"]=>
int(0)
["data"]=>
object(MongoBinData)#%d (2) {
["bin"]=>
string(3) "bar"
["type"]=>
int(2)
}
}
==DONE==

0 comments on commit 49d6485

Please sign in to comment.