-
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
hexadecibal
committed
Jan 5, 2022
1 parent
218076d
commit 917a1fd
Showing
6 changed files
with
373 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -102,3 +102,6 @@ dist | |
|
||
# TernJS port file | ||
.tern-port | ||
|
||
package-lock.json | ||
tests/drive* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,47 @@ | ||
# nebula-migrate | ||
Migration tool for upgrading Nebula Drives between updates with breaking changes | ||
Migration tool for upgrading Nebula drives between updates with breaking changes | ||
|
||
## Usage | ||
```js | ||
await Migrate({ rootdir: __dirname, drivePath: '/drive', encryptionKey, keyPair }) | ||
|
||
|
||
const drive = new Drive(path.join(__dirname, '/drive'), null, { | ||
keyPair, | ||
encryptionKey, | ||
swarmOpts: { | ||
server: true, | ||
client: true | ||
} | ||
}) | ||
|
||
await drive.ready() // Upgraded drive is now ready for use! | ||
|
||
|
||
/** | ||
* Existing drive is renamed to <drivePath>_old | ||
* New drive replaces the original at <drivePath> | ||
* | ||
* Old drive persists in the event migration fails. | ||
* Feel free to decide if this old directory can be removed or not. | ||
* | ||
* Before: | ||
* |__ root/ | ||
* |__ drive/ | ||
* | ||
* After: | ||
* |__root/ | ||
* |__ drive/ | ||
* |__ drive_old/ | ||
* | ||
* / | ||
``` | ||
#### `await Migrate({ rootdir, drivePath[,encryptionKey][,keyPair] })` | ||
Migrates an older version of nebula to the newer version. | ||
- `rootdir`: root directory that the drive resides in | ||
- `drivePath`: the relative path of the drive `/drive` | ||
- `encryptionKey`: Encryption key for migrating encrypted drives | ||
- `keyPair`: The original drive's keyPair |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,202 @@ | ||
const fs = require('fs') | ||
const path = require('path') | ||
const del = require('del') | ||
const Hypercore = require('hypercore') | ||
const HypercoreNew = require('@telios/nebula-drive-new/node_modules/hypercore') | ||
const Drive = require('@telios/nebula-drive') | ||
const DriveNew = require('@telios/nebula-drive-new') | ||
|
||
module.exports = async ({ rootdir, drivePath, keyPair, encryptionKey }) => { | ||
// 1. Output all transactions (encrypted) from Autobee into a migration folder. If migration folder exists, run migration | ||
try { | ||
fs.mkdirSync(path.join(rootdir, drivePath, 'migrate')) | ||
|
||
// Start old drive | ||
const drive = new Drive(path.join(rootdir, drivePath), null, { | ||
keyPair, | ||
encryptionKey, | ||
joinSwarm: false, | ||
swarmOpts: { | ||
server: true, | ||
client: true | ||
} | ||
}) | ||
|
||
await drive.ready() | ||
|
||
// Make file for migration script | ||
await createMigrationScript(drive, rootdir, drivePath) | ||
// 2. Create a new drive with the latest version | ||
const newDrive = new DriveNew(path.join(rootdir, '/drive_new'), null, { | ||
keyPair, | ||
encryptionKey, | ||
joinSwarm: false, | ||
swarmOpts: { | ||
server: true, | ||
client: true | ||
} | ||
}) | ||
|
||
// Initialize and close new drive only to populate necessary files and directories | ||
await newDrive.ready() | ||
await newDrive.close() | ||
|
||
// Close old drive before extracting and populating Hypercores | ||
await drive.close() | ||
|
||
// Remove new cores so they can be replace. The overwrite option in Hypercore does not seem to work as expected which is why these need to be deleted. | ||
await copyCores(rootdir, drivePath, encryptionKey) | ||
|
||
// 4. Run transasction scripts to fill new Hypercores | ||
await newDrive.ready() | ||
await populateCores(newDrive, rootdir, drivePath) | ||
await newDrive.close() | ||
|
||
// 5. Rename directories and files | ||
const files = fs.readdirSync(path.join(rootdir, drivePath, '/Files')) | ||
|
||
for(file of files) { | ||
fs.renameSync(path.join(rootdir, drivePath, '/Files', file), path.join(rootdir, 'drive_new', '/Files', file)) | ||
} | ||
|
||
fs.renameSync(path.join(rootdir, drivePath), path.join(rootdir, drivePath + '_old')) | ||
fs.renameSync(path.join(rootdir, 'drive_new'), path.join(rootdir, drivePath)) | ||
} catch(err) { | ||
console.log(err) | ||
} | ||
} | ||
|
||
async function createMigrationScript(drive, rootdir, drivePath) { | ||
try { | ||
// Make file for migration script | ||
const mainStream = drive.database.bee.createReadStream() | ||
const metaStream = drive.database.metadb.createReadStream() | ||
const localStream = drive._localHB.createReadStream() | ||
|
||
let bees = { | ||
"main": { | ||
"collections": {}, | ||
"tx": [] | ||
}, | ||
"meta": [], | ||
"local": [] | ||
} | ||
|
||
mainStream.on('data', data => { | ||
const item = JSON.parse(data.value.toString()) | ||
|
||
const sub = item.value.__sub | ||
const collection = bees.main.collections[sub] | ||
|
||
if(sub && !collection) { | ||
bees.main.collections[sub] = [item] | ||
} | ||
|
||
if(sub && collection) { | ||
collection.push(item) | ||
} | ||
|
||
if(!sub) { | ||
bees.main.tx.push(JSON.parse(data.value.toString())) | ||
} | ||
}) | ||
|
||
mainStream.on('end', () => { | ||
fs.writeFileSync(path.join(rootdir, drivePath, '/migrate/data.json'), JSON.stringify(bees)) | ||
}) | ||
|
||
metaStream.on('data', data => { | ||
bees.meta.push(JSON.parse(data.value.toString())) | ||
}) | ||
|
||
metaStream.on('end', () => { | ||
fs.writeFileSync(path.join(rootdir, drivePath, '/migrate/data.json'), JSON.stringify(bees)) | ||
}) | ||
|
||
localStream.on('data', data => { | ||
bees.local.push(JSON.parse(data.value.toString())) | ||
}) | ||
|
||
localStream.on('end', () => { | ||
fs.writeFileSync(path.join(rootdir, drivePath, '/migrate/data.json'), JSON.stringify(bees)) | ||
}) | ||
} catch(err) { | ||
throw err | ||
} | ||
} | ||
|
||
async function copyCores(rootdir, drivePath, encryptionKey) { | ||
try { | ||
const newCores = fs.readdirSync(path.join(rootdir, 'drive_new', '/Database')) | ||
|
||
for(const core of newCores) { | ||
if (fs.existsSync(path.join(rootdir, 'drive_new', '/Database/' + core))) { | ||
await del([ | ||
path.join(rootdir, 'drive_new', '/Database/' + core) | ||
]) | ||
} | ||
} | ||
|
||
// Rebuild Hypercores with existing keyPairs | ||
let cores | ||
|
||
cores = fs.readdirSync(path.join(rootdir, drivePath, '/Database')) | ||
|
||
for(const core of cores) { | ||
let feed = new Hypercore(path.join(rootdir, drivePath, '/Database/' + core), { encryptionKey }) | ||
|
||
await feed.ready() | ||
|
||
let keyPair = feed.core.header.signer | ||
|
||
await feed.close() | ||
|
||
|
||
feed = new HypercoreNew(path.join(rootdir, 'drive_new', '/Database/' + core), { keyPair, encryptionKey }) | ||
|
||
await feed.ready() | ||
|
||
keyPair = feed.core.header.signer | ||
await feed.close() | ||
} | ||
|
||
} catch(err) { | ||
throw err | ||
} | ||
} | ||
|
||
async function populateCores(drive, rootdir, drivePath) { | ||
try { | ||
let data = fs.readFileSync(path.join(rootdir, drivePath, '/migrate/data.json')) | ||
data = JSON.parse(data) | ||
|
||
const newBee = drive.database.bee | ||
const newMetadb = drive.database.metadb | ||
const newLocalB = drive._localHB | ||
|
||
for (const sub in data.main.collections) { | ||
const items = data.main.collections[sub] | ||
const collection = await drive.db.collection(sub) | ||
|
||
for(const item of items) { | ||
// Not needed anymore | ||
delete item.value.__sub | ||
await collection.put(item.key, item.value) | ||
} | ||
} | ||
|
||
for(const tx of data.main.tx) { | ||
await newBee.put(tx.key, tx.value) | ||
} | ||
|
||
for(const tx of data.meta) { | ||
await newMetadb.put(tx.key, tx.value) | ||
} | ||
|
||
for(const tx of data.local) { | ||
await newLocalB.put(tx.key, tx.value) | ||
} | ||
} catch(err) { | ||
throw err | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
const Drive = require('@telios/nebula-drive') | ||
const fs = require('fs') | ||
|
||
module.exports.bootstrap = async ({ path, keyPair, encryptionKey }) => { | ||
|
||
try { | ||
const drive = new Drive(path, null, { | ||
keyPair, | ||
encryptionKey, | ||
joinSwarm: false, | ||
swarmOpts: { | ||
server: true, | ||
client: true | ||
} | ||
}) | ||
|
||
await drive.ready() | ||
|
||
const collection = await drive.db.collection('foo') | ||
|
||
await collection.put('hello', { bar: "world" }) | ||
await collection.put('alice', { name: "fitzgerald" }) | ||
|
||
const stream = fs.createReadStream('./index.js') | ||
|
||
await drive.writeFile('/index.js', stream, { encrypted: true }) | ||
|
||
await drive.close() | ||
} catch(err) { | ||
console.log(err) | ||
} | ||
} |
Oops, something went wrong.