diff --git a/src/components/TestRun.vue b/src/components/TestRun.vue
index bd617a2f..25aa8fa0 100644
--- a/src/components/TestRun.vue
+++ b/src/components/TestRun.vue
@@ -37,20 +37,16 @@
-
+
-
-
-
-
+
-
-
-
{
@@ -262,12 +262,11 @@ export default {
if (this.rdf) {
const axes = this.rdf.inputs[0].axes; // something like "zyx"
let maxShape; // something like [16, 64, 64]
- const shape = this.rdf.inputs[0].shape;
- if (shape instanceof Array) {
- maxShape = shape;
+ if (this.fixedTileSize !== false) {
+ maxShape = this.fixedTileSize;
} else {
// array of undefined
- maxShape = shape.min.map(() => undefined);
+ maxShape = this.rdf.inputs[0].shape.min.map(() => undefined);
}
return axes.split("").reduce((acc, cur, i) => {
acc[cur] = maxShape[i];
@@ -278,6 +277,27 @@ export default {
}
}
},
+ watch: {
+ tileSizes: {
+ handler(oldObj, newObj) {
+ if (newObj.y !== newObj.x) {
+ this.tileSizes.y = newObj.x; // keep x and y the same
+ }
+ console.log(oldObj, newObj);
+ },
+ deep: true
+ },
+
+ tileOverlap: {
+ handler(oldObj, newObj) {
+ if (newObj.y !== newObj.x) {
+ this.tileOverlap.y = newObj.x; // keep x and y the same
+ }
+ console.log(oldObj, newObj);
+ },
+ deep: true
+ }
+ },
methods: {
async turnOn() {
this.switch = true;
@@ -285,6 +305,7 @@ export default {
await this.loadImJoy();
await this.loadTritonClient();
await this.loadRdf();
+ await this.loadTritonConfig();
this.setDefaultTileSize();
this.setDefaultOverlap();
await this.detectInputEndianness();
@@ -308,10 +329,15 @@ export default {
setDefaultTileSize() {
const tileSizes = Object.assign({}, this.inputMinShape);
- if (!this.fixedTileSize) {
+ const axes = this.rdf.inputs[0].axes;
+ if (this.fixedTileSize === false) {
const xyFactor = 4;
tileSizes.x = xyFactor * this.inputMinShape.x;
tileSizes.y = xyFactor * this.inputMinShape.y;
+ } else {
+ axes.split("").map((a, i) => {
+ tileSizes[a] = this.fixedTileSize[i];
+ });
}
this.tileSizes = tileSizes;
},
@@ -321,7 +347,7 @@ export default {
const outputSpec = this.rdf.outputs[0];
const axes = inputSpec.axes;
let overlap = {};
- if (outputSpec.halo) {
+ if (outputSpec.halo && this.fixedTileSize === false) {
axes.split("").map((a, i) => {
if (outputSpec.axes.includes(a) && a !== "z") {
overlap[a] = 2 * outputSpec.halo[i];
@@ -386,12 +412,29 @@ export default {
let outImg = await this.submitTensor(paddedTensor);
await this.api.log("Output tile shape: " + outImg._rshape);
const outTensor = ImjoyToTfJs(outImg);
- const cropedTensor = padder.crop(outTensor, padArr);
- return cropedTensor;
+ const isImg2Img =
+ this.rdf.outputs[0].axes.includes("x") &&
+ this.rdf.outputs[0].axes.includes("y");
+ let result = outTensor;
+ if (isImg2Img) {
+ const cropedTensor = padder.crop(outTensor, padArr);
+ result = cropedTensor;
+ }
+ return result;
},
async runTiles(tensor, inputSpec, outputSpec) {
- const padder = new ImgPadder(inputSpec, outputSpec, 0);
+ let padder;
+ if (this.fixedTileSize === false) {
+ padder = new ImgPadder(
+ undefined,
+ inputSpec.shape.min,
+ inputSpec.shape.step,
+ 0
+ );
+ } else {
+ padder = new ImgPadder(this.fixedTileSize, undefined, undefined, 0);
+ }
const tileSize = inputSpec.axes.split("").map(a => this.tileSizes[a]);
const overlap = inputSpec.axes.split("").map(a => this.tileOverlap[a]);
console.log("tile size:", tileSize, "overlap:", overlap);
@@ -402,6 +445,10 @@ export default {
await this.api.log("Number of tiles: " + inTiles.length);
const outTiles = [];
for (let i = 0; i < inTiles.length; i++) {
+ this.setInfoPanel(
+ `Running the model... (${i + 1}/${inTiles.length})`,
+ true
+ );
const tile = inTiles[i];
console.log(tile);
tile.slice(tensor);
@@ -514,6 +561,13 @@ export default {
this.triton = await server.get_service("triton-client");
},
+ async loadTritonConfig() {
+ const nickname = this.resourceItem.nickname;
+ const url = `https://ai.imjoy.io/triton/v2/models/${nickname}/config`;
+ const config = await fetch(url).then(res => res.json());
+ this.tritonConfig = config;
+ },
+
async loadImJoy() {
function waitForImjoy(timeout = 10000) {
return new Promise((resolve, reject) => {
diff --git a/src/imgProcess.js b/src/imgProcess.js
index 79858d1c..619c9c9c 100644
--- a/src/imgProcess.js
+++ b/src/imgProcess.js
@@ -368,23 +368,23 @@ export async function getNpyEndianness(url) {
}
export class ImgPadder {
- constructor(inputSpec, outputSpec, padValue = 0) {
- this.inputSpec = inputSpec;
- this.outputSpec = outputSpec;
+ constructor(fixedPaddedShape, padMin, padStep, padValue = 0) {
+ this.fixedPaddedShape = fixedPaddedShape;
+ this.padMin = padMin;
+ this.padStep = padStep;
this.padValue = padValue;
}
getPaddedShape(shape) {
- const specShape = this.inputSpec.shape;
let paddedShape = [];
- if (specShape instanceof Array) {
+ if (this.fixedPaddedShape) {
// Explicit shape
- paddedShape = specShape;
+ paddedShape = this.fixedPaddedShape;
} else {
// Implicit shape
// infer from the min and step
- const min = specShape.min;
- const step = specShape.step;
+ const min = this.padMin;
+ const step = this.padStep;
for (let d = 0; d < shape.length; d++) {
if (step[d] === 0) {
paddedShape.push(shape[d]);
@@ -427,27 +427,18 @@ export class ImgPadder {
crop(tensor, pad, halo = undefined) {
let res;
- const isImg2Img =
- this.outputSpec.axes.includes("x") && this.outputSpec.axes.includes("y");
- if (isImg2Img) {
- // img-to-img model
- if (halo) {
- res = tf.slice(
- tensor,
- pad.map((p, i) => p[0] + halo[i]),
- tensor.shape.map((s, i) => s - pad[i][0] - pad[i][1] - halo[i] * 2)
- );
- } else {
- res = tf.slice(
- tensor,
- pad.map(p => p[0]),
- tensor.shape.map((s, i) => s - pad[i][0] - pad[i][1])
- );
- }
+ if (halo) {
+ res = tf.slice(
+ tensor,
+ pad.map((p, i) => p[0] + halo[i]),
+ tensor.shape.map((s, i) => s - pad[i][0] - pad[i][1] - halo[i] * 2)
+ );
} else {
- // other model, e.g. classification
- // no crop
- res = tensor;
+ res = tf.slice(
+ tensor,
+ pad.map(p => p[0]),
+ tensor.shape.map((s, i) => s - pad[i][0] - pad[i][1])
+ );
}
res._rdtype = tensor._rdtype;
return res;