Fix tensor cleanup and orphan event listener
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
This commit is contained in:
@@ -25,12 +25,14 @@ export default {
|
||||
},
|
||||
closeCamera () {
|
||||
this.cameraStream.getTracks().forEach( t => t.stop())
|
||||
this.cameraStream = null
|
||||
this.videoAvailable = false
|
||||
},
|
||||
captureVidFrame() {
|
||||
const vidViewer = this.$refs.vid_viewer
|
||||
vidViewer.pause()
|
||||
let tempCVS = document.createElement('canvas')
|
||||
tempCVS.id = 'temp-video-canvas'
|
||||
tempCVS.height = vidViewer.videoHeight || parseInt(vidViewer.style.height)
|
||||
tempCVS.width = vidViewer.videoWidth || parseInt(vidViewer.style.width)
|
||||
const tempCtx = tempCVS.getContext('2d')
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
</f7-navbar>
|
||||
<f7-block class="detect-grid">
|
||||
<div class="image-container" ref="image_container">
|
||||
<SvgIcon v-if="!imageView && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" />
|
||||
<SvgIcon v-if="!imageView.src && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" />
|
||||
<div class="vid-container" :style="`display: ${videoAvailable ? 'block' : 'none'}; position: absolute; width: 100%; height: 100%;`">
|
||||
<video id="vid-view" ref="vid_viewer" :srcObject="cameraStream" :autoPlay="true" style="width: 100%; height: 100%"></video>
|
||||
<f7-button @click="captureVidFrame()" style="position: absolute; bottom: 32px; left: 50%; transform: translateX(-50%); z-index: 3;" fill large>Capture</f7-button>
|
||||
@@ -120,7 +120,6 @@
|
||||
import submitMixin from './submit-mixin'
|
||||
import detectionMixin from './detection-mixin'
|
||||
import cameraMixin from './camera-mixin'
|
||||
import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
|
||||
|
||||
export default {
|
||||
mixins: [submitMixin, detectionMixin, cameraMixin],
|
||||
@@ -139,7 +138,7 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
|
||||
activeRegion: 4,
|
||||
classesList: [],
|
||||
imageLoaded: false,
|
||||
imageView: null,
|
||||
imageView: new Image(),
|
||||
imageLoadMode: "environment",
|
||||
detecting: false,
|
||||
detectPanel: false,
|
||||
@@ -300,7 +299,7 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
|
||||
this.videoAvailable = await this.openCamera(this.$refs.image_container)
|
||||
if (this.videoAvailable) {
|
||||
this.imageLoaded = false
|
||||
this.imageView = null
|
||||
this.imageView.src = null
|
||||
this.$refs.image_cvs.style['background-image'] = 'none'
|
||||
this.resultData = {}
|
||||
var trackDetails = this.cameraStream.getVideoTracks()[0].getSettings()
|
||||
@@ -384,11 +383,11 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
|
||||
this.detecting = true
|
||||
resolve('data:image/jpg;base64,' + searchImage)
|
||||
}
|
||||
var reader = new FileReader()
|
||||
const reader = new FileReader()
|
||||
reader.addEventListener("load", () => {
|
||||
this.detecting = true
|
||||
resolve(reader.result)
|
||||
})
|
||||
},{once: true})
|
||||
if (this.imageLoadMode == 'sample') {
|
||||
fetch(`${this.isCordova ? 'https://localhost' : '.'}/samples/${this.detectorName}-${searchImage}.jpeg`).then( resp => {
|
||||
return resp.blob()
|
||||
@@ -408,7 +407,6 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
|
||||
this.imageLoaded = true
|
||||
this.resultData = {}
|
||||
this.selectedChip = -1
|
||||
this.imageView = new Image()
|
||||
this.imageView.src = imgData
|
||||
return(this.imageView.decode())
|
||||
}).then( () => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import * as tf from '@tensorflow/tfjs'
|
||||
import { f7 } from 'framework7-vue'
|
||||
|
||||
var model = null
|
||||
let model = null
|
||||
|
||||
export default {
|
||||
methods: {
|
||||
@@ -9,7 +9,7 @@ export default {
|
||||
if (model && model.modelURL == weights) {
|
||||
return model
|
||||
} else if (model) {
|
||||
model.dispose()
|
||||
tf.dispose(model)
|
||||
}
|
||||
model = await tf.loadGraphModel(weights)
|
||||
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
|
||||
@@ -34,7 +34,8 @@ export default {
|
||||
|
||||
console.time('run prediction')
|
||||
const res = model.predict(input)
|
||||
const rawRes = tf.transpose(res,[0,2,1]).arraySync()[0]
|
||||
const tRes = tf.transpose(res,[0,2,1])
|
||||
const rawRes = tRes.arraySync()[0]
|
||||
console.timeEnd('run prediction')
|
||||
|
||||
console.time('post-process')
|
||||
@@ -59,6 +60,8 @@ export default {
|
||||
if (rawBoxes.length > 0) {
|
||||
const tBoxes = tf.tensor2d(rawBoxes)
|
||||
let tScores = null
|
||||
let resBoxes = null
|
||||
let validBoxes = []
|
||||
let structureScores = null
|
||||
let boxes_data = []
|
||||
let scores_data = []
|
||||
@@ -66,8 +69,9 @@ export default {
|
||||
for (var c = 0; c < outputSize - 4; c++) {
|
||||
structureScores = rawScores.map(x => x[c])
|
||||
tScores = tf.tensor1d(structureScores)
|
||||
var validBoxes = await tf.image.nonMaxSuppressionAsync(tBoxes,tScores,10,0.5,.05)
|
||||
validBoxes = validBoxes.dataSync()
|
||||
resBoxes = await tf.image.nonMaxSuppressionAsync(tBoxes,tScores,10,0.5,.05)
|
||||
validBoxes = resBoxes.dataSync()
|
||||
tf.dispose(resBoxes)
|
||||
if (validBoxes) {
|
||||
boxes_data.push(...rawBoxes.filter( (_, idx) => validBoxes.includes(idx)))
|
||||
var outputScores = structureScores.filter( (_, idx) => validBoxes.includes(idx))
|
||||
@@ -76,8 +80,10 @@ export default {
|
||||
}
|
||||
}
|
||||
|
||||
validBoxes = []
|
||||
tf.dispose(tBoxes)
|
||||
tf.dispose(tScores)
|
||||
tf.dispose(tRes)
|
||||
const valid_detections_data = classes_data.length
|
||||
var output = {
|
||||
detections: []
|
||||
|
||||
Reference in New Issue
Block a user