Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
hexadecibal committed Jan 5, 2022
1 parent 218076d commit 917a1fd
Show file tree
Hide file tree
Showing 6 changed files with 373 additions and 4 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,6 @@ dist

# TernJS port file
.tern-port

package-lock.json
tests/drive*
47 changes: 46 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,47 @@
# nebula-migrate
Migration tool for upgrading Nebula Drives between updates with breaking changes
Migration tool for upgrading Nebula drives between updates with breaking changes

## Usage
```js
await Migrate({ rootdir: __dirname, drivePath: '/drive', encryptionKey, keyPair })


const drive = new Drive(path.join(__dirname, '/drive'), null, {
keyPair,
encryptionKey,
swarmOpts: {
server: true,
client: true
}
})

await drive.ready() // Upgraded drive is now ready for use!


/**
* Existing drive is renamed to <drivePath>_old
* New drive replaces the original at <drivePath>
*
* Old drive persists in the event migration fails.
* Feel free to decide if this old directory can be removed or not.
*
* Before:
* |__ root/
* |__ drive/
*
* After:
* |__root/
* |__ drive/
* |__ drive_old/
*
* /
```
#### `await Migrate({ rootdir, drivePath[,encryptionKey][,keyPair] })`
Migrates an older version of nebula to the newer version.
- `rootdir`: root directory that the drive resides in
- `drivePath`: the relative path of the drive `/drive`
- `encryptionKey`: Encryption key for migrating encrypted drives
- `keyPair`: The original drive's keyPair
202 changes: 202 additions & 0 deletions index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
const fs = require('fs')
const path = require('path')
const del = require('del')
const Hypercore = require('hypercore')
const HypercoreNew = require('@telios/nebula-drive-new/node_modules/hypercore')
const Drive = require('@telios/nebula-drive')
const DriveNew = require('@telios/nebula-drive-new')

module.exports = async ({ rootdir, drivePath, keyPair, encryptionKey }) => {
// 1. Output all transactions (encrypted) from Autobee into a migration folder. If migration folder exists, run migration
try {
fs.mkdirSync(path.join(rootdir, drivePath, 'migrate'))

// Start old drive
const drive = new Drive(path.join(rootdir, drivePath), null, {
keyPair,
encryptionKey,
joinSwarm: false,
swarmOpts: {
server: true,
client: true
}
})

await drive.ready()

// Make file for migration script
await createMigrationScript(drive, rootdir, drivePath)
// 2. Create a new drive with the latest version
const newDrive = new DriveNew(path.join(rootdir, '/drive_new'), null, {
keyPair,
encryptionKey,
joinSwarm: false,
swarmOpts: {
server: true,
client: true
}
})

// Initialize and close new drive only to populate necessary files and directories
await newDrive.ready()
await newDrive.close()

// Close old drive before extracting and populating Hypercores
await drive.close()

// Remove new cores so they can be replace. The overwrite option in Hypercore does not seem to work as expected which is why these need to be deleted.
await copyCores(rootdir, drivePath, encryptionKey)

// 4. Run transasction scripts to fill new Hypercores
await newDrive.ready()
await populateCores(newDrive, rootdir, drivePath)
await newDrive.close()

// 5. Rename directories and files
const files = fs.readdirSync(path.join(rootdir, drivePath, '/Files'))

for(file of files) {
fs.renameSync(path.join(rootdir, drivePath, '/Files', file), path.join(rootdir, 'drive_new', '/Files', file))
}

fs.renameSync(path.join(rootdir, drivePath), path.join(rootdir, drivePath + '_old'))
fs.renameSync(path.join(rootdir, 'drive_new'), path.join(rootdir, drivePath))
} catch(err) {
console.log(err)
}
}

async function createMigrationScript(drive, rootdir, drivePath) {
try {
// Make file for migration script
const mainStream = drive.database.bee.createReadStream()
const metaStream = drive.database.metadb.createReadStream()
const localStream = drive._localHB.createReadStream()

let bees = {
"main": {
"collections": {},
"tx": []
},
"meta": [],
"local": []
}

mainStream.on('data', data => {
const item = JSON.parse(data.value.toString())

const sub = item.value.__sub
const collection = bees.main.collections[sub]

if(sub && !collection) {
bees.main.collections[sub] = [item]
}

if(sub && collection) {
collection.push(item)
}

if(!sub) {
bees.main.tx.push(JSON.parse(data.value.toString()))
}
})

mainStream.on('end', () => {
fs.writeFileSync(path.join(rootdir, drivePath, '/migrate/data.json'), JSON.stringify(bees))
})

metaStream.on('data', data => {
bees.meta.push(JSON.parse(data.value.toString()))
})

metaStream.on('end', () => {
fs.writeFileSync(path.join(rootdir, drivePath, '/migrate/data.json'), JSON.stringify(bees))
})

localStream.on('data', data => {
bees.local.push(JSON.parse(data.value.toString()))
})

localStream.on('end', () => {
fs.writeFileSync(path.join(rootdir, drivePath, '/migrate/data.json'), JSON.stringify(bees))
})
} catch(err) {
throw err
}
}

async function copyCores(rootdir, drivePath, encryptionKey) {
try {
const newCores = fs.readdirSync(path.join(rootdir, 'drive_new', '/Database'))

for(const core of newCores) {
if (fs.existsSync(path.join(rootdir, 'drive_new', '/Database/' + core))) {
await del([
path.join(rootdir, 'drive_new', '/Database/' + core)
])
}
}

// Rebuild Hypercores with existing keyPairs
let cores

cores = fs.readdirSync(path.join(rootdir, drivePath, '/Database'))

for(const core of cores) {
let feed = new Hypercore(path.join(rootdir, drivePath, '/Database/' + core), { encryptionKey })

await feed.ready()

let keyPair = feed.core.header.signer

await feed.close()


feed = new HypercoreNew(path.join(rootdir, 'drive_new', '/Database/' + core), { keyPair, encryptionKey })

await feed.ready()

keyPair = feed.core.header.signer
await feed.close()
}

} catch(err) {
throw err
}
}

async function populateCores(drive, rootdir, drivePath) {
try {
let data = fs.readFileSync(path.join(rootdir, drivePath, '/migrate/data.json'))
data = JSON.parse(data)

const newBee = drive.database.bee
const newMetadb = drive.database.metadb
const newLocalB = drive._localHB

for (const sub in data.main.collections) {
const items = data.main.collections[sub]
const collection = await drive.db.collection(sub)

for(const item of items) {
// Not needed anymore
delete item.value.__sub
await collection.put(item.key, item.value)
}
}

for(const tx of data.main.tx) {
await newBee.put(tx.key, tx.value)
}

for(const tx of data.meta) {
await newMetadb.put(tx.key, tx.value)
}

for(const tx of data.local) {
await newLocalB.put(tx.key, tx.value)
}
} catch(err) {
throw err
}
}
16 changes: 13 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
{
"name": "@telios/nebula-migrate",
"version": "1.0.0-alpha.1",
"version": "1.0.0",
"description": "Migration tool for upgrading Nebula Drives between updates with breaking changes",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
"test": "tape tests/*.test.js | tap-spec"
},
"repository": {
"type": "git",
Expand All @@ -15,5 +15,15 @@
"bugs": {
"url": "https://github.com/Telios-org/nebula-migrate/issues"
},
"homepage": "https://github.com/Telios-org/nebula-migrate#readme"
"homepage": "https://github.com/Telios-org/nebula-migrate#readme",
"devDependencies": {
"del": "^6.0.0",
"tap-spec": "^5.0.0",
"tape": "^5.4.0",
"tape-promise": "^4.0.0"
},
"dependencies": {
"@telios/nebula-drive": "^5.3.1",
"@telios/nebula-drive-new": "https://github.com/Telios-org/nebula-drive#hyper_latest"
}
}
32 changes: 32 additions & 0 deletions tests/helper.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
const Drive = require('@telios/nebula-drive')
const fs = require('fs')

module.exports.bootstrap = async ({ path, keyPair, encryptionKey }) => {

try {
const drive = new Drive(path, null, {
keyPair,
encryptionKey,
joinSwarm: false,
swarmOpts: {
server: true,
client: true
}
})

await drive.ready()

const collection = await drive.db.collection('foo')

await collection.put('hello', { bar: "world" })
await collection.put('alice', { name: "fitzgerald" })

const stream = fs.createReadStream('./index.js')

await drive.writeFile('/index.js', stream, { encrypted: true })

await drive.close()
} catch(err) {
console.log(err)
}
}
Loading

0 comments on commit 917a1fd

Please sign in to comment.