Fix tensor cleanup and orphan event listener #171

Merged
jgeorgi merged 1 commits from xps-mem-leak into main 2024-04-12 02:25:03 +00:00
3 changed files with 18 additions and 12 deletions

View File

@@ -25,12 +25,14 @@ export default {
}, },
closeCamera () { closeCamera () {
this.cameraStream.getTracks().forEach( t => t.stop()) this.cameraStream.getTracks().forEach( t => t.stop())
this.cameraStream = null
this.videoAvailable = false this.videoAvailable = false
}, },
captureVidFrame() { captureVidFrame() {
const vidViewer = this.$refs.vid_viewer const vidViewer = this.$refs.vid_viewer
vidViewer.pause() vidViewer.pause()
let tempCVS = document.createElement('canvas') let tempCVS = document.createElement('canvas')
tempCVS.id = 'temp-video-canvas'
tempCVS.height = vidViewer.videoHeight || parseInt(vidViewer.style.height) tempCVS.height = vidViewer.videoHeight || parseInt(vidViewer.style.height)
tempCVS.width = vidViewer.videoWidth || parseInt(vidViewer.style.width) tempCVS.width = vidViewer.videoWidth || parseInt(vidViewer.style.width)
const tempCtx = tempCVS.getContext('2d') const tempCtx = tempCVS.getContext('2d')

View File

@@ -10,7 +10,7 @@
</f7-navbar> </f7-navbar>
<f7-block class="detect-grid"> <f7-block class="detect-grid">
<div class="image-container" ref="image_container"> <div class="image-container" ref="image_container">
<SvgIcon v-if="!imageView && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" /> <SvgIcon v-if="!imageView.src && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" />
<div class="vid-container" :style="`display: ${videoAvailable ? 'block' : 'none'}; position: absolute; width: 100%; height: 100%;`"> <div class="vid-container" :style="`display: ${videoAvailable ? 'block' : 'none'}; position: absolute; width: 100%; height: 100%;`">
<video id="vid-view" ref="vid_viewer" :srcObject="cameraStream" :autoPlay="true" style="width: 100%; height: 100%"></video> <video id="vid-view" ref="vid_viewer" :srcObject="cameraStream" :autoPlay="true" style="width: 100%; height: 100%"></video>
<f7-button @click="captureVidFrame()" style="position: absolute; bottom: 32px; left: 50%; transform: translateX(-50%); z-index: 3;" fill large>Capture</f7-button> <f7-button @click="captureVidFrame()" style="position: absolute; bottom: 32px; left: 50%; transform: translateX(-50%); z-index: 3;" fill large>Capture</f7-button>
@@ -120,7 +120,6 @@
import submitMixin from './submit-mixin' import submitMixin from './submit-mixin'
import detectionMixin from './detection-mixin' import detectionMixin from './detection-mixin'
import cameraMixin from './camera-mixin' import cameraMixin from './camera-mixin'
import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
export default { export default {
mixins: [submitMixin, detectionMixin, cameraMixin], mixins: [submitMixin, detectionMixin, cameraMixin],
@@ -139,7 +138,7 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
activeRegion: 4, activeRegion: 4,
classesList: [], classesList: [],
imageLoaded: false, imageLoaded: false,
imageView: null, imageView: new Image(),
imageLoadMode: "environment", imageLoadMode: "environment",
detecting: false, detecting: false,
detectPanel: false, detectPanel: false,
@@ -300,7 +299,7 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
this.videoAvailable = await this.openCamera(this.$refs.image_container) this.videoAvailable = await this.openCamera(this.$refs.image_container)
if (this.videoAvailable) { if (this.videoAvailable) {
this.imageLoaded = false this.imageLoaded = false
this.imageView = null this.imageView.src = null
this.$refs.image_cvs.style['background-image'] = 'none' this.$refs.image_cvs.style['background-image'] = 'none'
this.resultData = {} this.resultData = {}
var trackDetails = this.cameraStream.getVideoTracks()[0].getSettings() var trackDetails = this.cameraStream.getVideoTracks()[0].getSettings()
@@ -384,11 +383,11 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
this.detecting = true this.detecting = true
resolve('data:image/jpg;base64,' + searchImage) resolve('data:image/jpg;base64,' + searchImage)
} }
var reader = new FileReader() const reader = new FileReader()
reader.addEventListener("load", () => { reader.addEventListener("load", () => {
this.detecting = true this.detecting = true
resolve(reader.result) resolve(reader.result)
}) },{once: true})
if (this.imageLoadMode == 'sample') { if (this.imageLoadMode == 'sample') {
fetch(`${this.isCordova ? 'https://localhost' : '.'}/samples/${this.detectorName}-${searchImage}.jpeg`).then( resp => { fetch(`${this.isCordova ? 'https://localhost' : '.'}/samples/${this.detectorName}-${searchImage}.jpeg`).then( resp => {
return resp.blob() return resp.blob()
@@ -408,7 +407,6 @@ import { Conv2DBackpropFilter } from '@tensorflow/tfjs'
this.imageLoaded = true this.imageLoaded = true
this.resultData = {} this.resultData = {}
this.selectedChip = -1 this.selectedChip = -1
this.imageView = new Image()
this.imageView.src = imgData this.imageView.src = imgData
return(this.imageView.decode()) return(this.imageView.decode())
}).then( () => { }).then( () => {

View File

@@ -1,7 +1,7 @@
import * as tf from '@tensorflow/tfjs' import * as tf from '@tensorflow/tfjs'
import { f7 } from 'framework7-vue' import { f7 } from 'framework7-vue'
var model = null let model = null
export default { export default {
methods: { methods: {
@@ -9,7 +9,7 @@ export default {
if (model && model.modelURL == weights) { if (model && model.modelURL == weights) {
return model return model
} else if (model) { } else if (model) {
model.dispose() tf.dispose(model)
} }
model = await tf.loadGraphModel(weights) model = await tf.loadGraphModel(weights)
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3) const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
@@ -34,7 +34,8 @@ export default {
console.time('run prediction') console.time('run prediction')
const res = model.predict(input) const res = model.predict(input)
const rawRes = tf.transpose(res,[0,2,1]).arraySync()[0] const tRes = tf.transpose(res,[0,2,1])
const rawRes = tRes.arraySync()[0]
console.timeEnd('run prediction') console.timeEnd('run prediction')
console.time('post-process') console.time('post-process')
@@ -59,6 +60,8 @@ export default {
if (rawBoxes.length > 0) { if (rawBoxes.length > 0) {
const tBoxes = tf.tensor2d(rawBoxes) const tBoxes = tf.tensor2d(rawBoxes)
let tScores = null let tScores = null
let resBoxes = null
let validBoxes = []
let structureScores = null let structureScores = null
let boxes_data = [] let boxes_data = []
let scores_data = [] let scores_data = []
@@ -66,8 +69,9 @@ export default {
for (var c = 0; c < outputSize - 4; c++) { for (var c = 0; c < outputSize - 4; c++) {
structureScores = rawScores.map(x => x[c]) structureScores = rawScores.map(x => x[c])
tScores = tf.tensor1d(structureScores) tScores = tf.tensor1d(structureScores)
var validBoxes = await tf.image.nonMaxSuppressionAsync(tBoxes,tScores,10,0.5,.05) resBoxes = await tf.image.nonMaxSuppressionAsync(tBoxes,tScores,10,0.5,.05)
validBoxes = validBoxes.dataSync() validBoxes = resBoxes.dataSync()
tf.dispose(resBoxes)
if (validBoxes) { if (validBoxes) {
boxes_data.push(...rawBoxes.filter( (_, idx) => validBoxes.includes(idx))) boxes_data.push(...rawBoxes.filter( (_, idx) => validBoxes.includes(idx)))
var outputScores = structureScores.filter( (_, idx) => validBoxes.includes(idx)) var outputScores = structureScores.filter( (_, idx) => validBoxes.includes(idx))
@@ -76,8 +80,10 @@ export default {
} }
} }
validBoxes = []
tf.dispose(tBoxes) tf.dispose(tBoxes)
tf.dispose(tScores) tf.dispose(tScores)
tf.dispose(tRes)
const valid_detections_data = classes_data.length const valid_detections_data = classes_data.length
var output = { var output = {
detections: [] detections: []