17 Commits

Author SHA1 Message Date
523b50ec65 Cleanup for 0.5.0 alpha release
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 36s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-08-21 15:48:55 -07:00
f35b28a7fb Parse model urls for full generalization
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 42s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-08-21 14:44:45 -07:00
94995a7a74 Enable vite preview script
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 38s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-08-21 11:55:55 -07:00
daf17bcdff Remove model root in favor of relative urls
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 37s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-08-20 17:34:46 -07:00
56a6d85f75 Get better model root using import.meta
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 37s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-08-20 17:06:47 -07:00
46b5ba7d6e Fix root of model urls
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 36s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-08-15 16:48:50 -07:00
401e5831c7 Fallback to non worker tfjs when on Safari (#193)
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 38s
Safari's worker limitations mean that detection threads in the worker barely function. Until Apple quits being whiny jerks about PWAs, this workaround is required to bypass the message calls to the workers and use the old single threaded system when Safari is detected.

Reviewed-on: #193
2024-08-15 22:43:19 +00:00
7a19b9c43c Make worker inline
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 35s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-30 20:38:14 -07:00
ec1fc6d28d Fix info label crash
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 33s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-28 21:40:13 -07:00
7b800d6b39 Fix camera crash when structure selected
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 34s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-28 20:19:46 -07:00
1a703b0100 Switch shared worker to basic service worker (#191)
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 33s
Shared workers seem to cause problems with iOS (and sharing wasn't really required anyway), so this PR changes the shared workers to non-shared workers.  As a benefit, it preloads the full model and video models simultaneously which iproves performance when starting the video and running post video detection.

Signed-off-by: Justin Georgi <justin.georgi@gmail.com>

Reviewed-on: #191
2024-07-29 00:54:15 +00:00
0fab2da693 Remove click action from main detection svg icon
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 34s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-25 17:16:31 -07:00
0d96174279 Clean up shared worker changes
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 33s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-25 17:07:16 -07:00
6ab643a16f Change vite shared worker to query suffix
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 34s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-25 17:03:32 -07:00
3f0860534d Move worker url call to detect page create
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 31s
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-07-25 16:27:45 -07:00
1d4f8c8ecd Fix shared worker for vite build (#188)
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 31s
The PR uses the vite recommend method of calling the shared worker so that PWA builds properly.

Signed-off-by: Justin Georgi <justin.georgi@gmail.com>

Reviewed-on: #188
2024-07-25 23:09:51 +00:00
8cdded7617 Add detection worker (#187)
All checks were successful
Build Dev PWA / Build-PWA (push) Successful in 31s
Closes: #186

This PR shifts much of the tensorflow function to a shared worker for multithreading performance.

Reviewed-on: #187
2024-07-25 17:56:21 +00:00
10 changed files with 302 additions and 90 deletions

View File

@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='utf-8'?>
<widget id="edu.midwestern.alvinn" version="0.5.0-rc" xmlns="http://www.w3.org/ns/widgets" xmlns:cdv="http://cordova.apache.org/ns/1.0" xmlns:android="http://schemas.android.com/apk/res/android">
<widget id="edu.midwestern.alvinn" version="0.5.0-alpha" xmlns="http://www.w3.org/ns/widgets" xmlns:cdv="http://cordova.apache.org/ns/1.0" xmlns:android="http://schemas.android.com/apk/res/android">
<name>ALVINN</name>
<description>Anatomy Lab Visual Identification Neural Network.</description>
<author email="jgeorg@midwestern.edu" href="https://midwestern.edu">

View File

@@ -1,7 +1,7 @@
{
"name": "edu.midwestern.alvinn",
"displayName": "ALVINN",
"version": "0.5.0-rc",
"version": "0.5.0-alpha",
"description": "Anatomy Lab Visual Identification Neural Network.",
"main": "index.js",
"scripts": {

View File

@@ -1,7 +1,7 @@
{
"name": "alvinn",
"private": true,
"version": "0.5.0-rc",
"version": "0.5.0-alpha",
"description": "ALVINN",
"repository": "",
"license": "UNLICENSED",
@@ -14,7 +14,8 @@
"cordova-ios": "cross-env TARGET=cordova cross-env NODE_ENV=production vite build && node ./build/build-cordova.js && cd cordova && cordova run ios",
"build-cordova-android": "cross-env TARGET=cordova cross-env NODE_ENV=production vite build && node ./build/build-cordova.js && cd cordova && cordova build android",
"cordova-android": "cross-env TARGET=cordova cross-env NODE_ENV=production vite build && node ./build/build-cordova.js && cd cordova && cordova run android",
"postinstall": "cpy --flat ./node_modules/framework7-icons/fonts/*.* ./src/fonts/"
"postinstall": "cpy --flat ./node_modules/framework7-icons/fonts/*.* ./src/fonts/",
"preview": "vite preview"
},
"browserslist": [
"IOS >= 15",

View File

@@ -1,45 +1,39 @@
import * as tf from '@tensorflow/tfjs'
import { f7 } from 'framework7-vue'
let model = null
self.onconnect = (e) => {
const port = e.ports[0];
port.onmessage = function (e) {
onmessage = function (e) {
switch (e.data.call) {
case 'loadModel':
loadModel('.' + e.data.weights,e.data.preload).then(() => {
port.postMessage({success: 'model'})
loadModel(e.data.weights,e.data.preload).then(() => {
postMessage({success: 'model'})
}).catch((err) => {
port.postMessage({error: true, message: err.message})
postMessage({error: true, message: err.message})
})
break
case 'localDetect':
localDetect(e.data.image).then((dets) => {
port.postMessage({success: 'detection', detections: dets})
postMessage({success: 'detection', detections: dets})
}).catch((err) => {
port.postMessage({error: true, message: err.message})
//throw (err)
postMessage({error: true, message: err.message})
})
e.data.image.close()
break
case 'videoFrame':
videoFrame(e.data.image).then((frameDet) =>{
port.postMessage({succes: 'frame', coords: frameDet.cds, modelWidth: frameDet.mW, modelHeight: frameDet.mH})
postMessage({succes: 'frame', coords: frameDet.cds, modelWidth: frameDet.mW, modelHeight: frameDet.mH})
}).catch((err) => {
port.postMessage({error: true, message: err.message})
postMessage({error: true, message: err.message})
})
e.data.image.close()
break
default:
console.log('Worker message incoming:')
console.log(e)
port.postMessage({result1: 'First result', result2: 'Second result'})
postMessage({result1: 'First result', result2: 'Second result'})
break
}
}
port.start()
}
async function loadModel(weights, preload) {
@@ -63,7 +57,7 @@ async function loadModel(weights, preload) {
}
async function localDetect(imageData) {
console.time('pre-process')
console.time('sw: pre-process')
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
let gTense = null
const input = tf.tidy(() => {
@@ -71,15 +65,15 @@ async function localDetect(imageData) {
return tf.concat([gTense,gTense,gTense],3)
})
tf.dispose(gTense)
console.timeEnd('pre-process')
console.timeEnd('sw: pre-process')
console.time('run prediction')
console.time('sw: run prediction')
const res = model.predict(input)
const tRes = tf.transpose(res,[0,2,1])
const rawRes = tRes.arraySync()[0]
console.timeEnd('run prediction')
console.timeEnd('sw: run prediction')
console.time('post-process')
console.time('sw: post-process')
const outputSize = res.shape[1]
let rawBoxes = []
let rawScores = []
@@ -144,14 +138,14 @@ async function localDetect(imageData) {
}
tf.dispose(res)
tf.dispose(input)
console.timeEnd('post-process')
console.timeEnd('sw: post-process')
return output || { detections: [] }
}
async function videoFrame (vidData) {
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
console.time('frame-process')
console.time('sw: frame-process')
let rawCoords = []
try {
const input = tf.tidy(() => {
@@ -177,6 +171,6 @@ async function videoFrame (vidData) {
} catch (e) {
console.log(e)
}
console.timeEnd('frame-process')
console.timeEnd('sw: frame-process')
return {cds: rawCoords, mW: modelWidth, mH: modelHeight}
}

View File

@@ -79,6 +79,7 @@
.then((mod) => { return mod.text() })
this.siteConf = YAML.parse(confText)
}
if (window.safari !== undefined) {store().safariDetected()}
const loadSiteSettings = localStorage.getItem('siteSettings')
if (loadSiteSettings) {
let loadedSettings = JSON.parse(loadSiteSettings)

View File

@@ -4,12 +4,13 @@ const state = reactive({
disclaimerAgreement: false,
enabledRegions: ['thorax','abdomen','limbs','head'],
regionIconSet: Math.floor(Math.random() * 3) + 1,
version: '0.5.0-rc',
version: '0.5.0-alpha',
fullscreen: false,
useExternal: 'optional',
siteDemo: false,
externalServerList: [],
infoUrl: false
infoUrl: false,
safariBrowser: false
})
const set = (config, confObj) => {
@@ -21,6 +22,10 @@ const agree = () => {
state.disclaimerAgreement = true
}
const safariDetected = () => {
state.safariBrowser = true
}
const getServerList = () => {
if (state.useExternal == 'required') {
return state.externalServerList[0]
@@ -50,8 +55,10 @@ export default () => ({
getVersion: computed(() => state.version),
getIconSet: computed(() => state.regionIconSet),
getInfoUrl: computed(() => state.infoUrl),
isSafari: computed(() => state.safariBrowser),
set,
agree,
safariDetected,
getServerList,
toggleFullscreen
})

View File

@@ -41,16 +41,20 @@ export default {
tempCtx.drawImage(vidViewer, 0, 0)
this.getImage(tempCVS.toDataURL())
},
async videoFrameDetect (vidData) {
const vidWorker = new SharedWorker('../assets/detect-worker.js',{type: 'module'})
vidWorker.port.onmessage = (eVid) => {
self = this
async videoFrameDetectWorker (vidData) {
const startDetection = () => {
createImageBitmap(vidData).then(imVideoFrame => {
this.vidWorker.postMessage({call: 'videoFrame', image: imVideoFrame}, [imVideoFrame])
})
}
vidData.addEventListener('resize',startDetection,{once: true})
this.vidWorker.onmessage = (eVid) => {
if (eVid.data.error) {
console.log(eVid.data.message)
f7.dialog.alert(`ALVINN AI model error: ${eVid.data.message}`)
} else if (this.videoAvailable) {
createImageBitmap(vidData).then(imVideoFrame => {
vidWorker.port.postMessage({call: 'videoFrame', image: imVideoFrame}, [imVideoFrame])
this.vidWorker.postMessage({call: 'videoFrame', image: imVideoFrame}, [imVideoFrame])
})
if (eVid.data.coords) {
imageCtx.clearRect(0,0,imCanvas.width,imCanvas.height)
@@ -65,7 +69,6 @@ export default {
}
}
vidWorker.port.postMessage({call: 'loadModel', weights: this.miniLocation, preload: true})
const imCanvas = this.$refs.image_cvs
const imageCtx = imCanvas.getContext("2d")
const target = this.$refs.target_image

View File

@@ -10,7 +10,7 @@
</f7-navbar>
<f7-block class="detect-grid">
<div class="image-container" ref="image_container">
<SvgIcon v-if="!imageView.src && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" />
<SvgIcon v-if="!imageView.src && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)"/>
<div class="vid-container" :style="`display: ${videoAvailable ? 'block' : 'none'}; position: absolute; width: 100%; height: 100%;`">
<video id="vid-view" ref="vid_viewer" :srcObject="cameraStream" :autoPlay="true" style="width: 100%; height: 100%"></video>
<f7-button @click="captureVidFrame()" style="position: absolute; bottom: 32px; left: 50%; transform: translateX(-50%); z-index: 3;" fill large>Capture</f7-button>
@@ -140,6 +140,8 @@
import detectionMixin from './detection-mixin'
import cameraMixin from './camera-mixin'
import detectionWorker from '@/assets/detect-worker.js?worker&inline'
export default {
mixins: [submitMixin, detectionMixin, cameraMixin],
props: {
@@ -178,7 +180,8 @@
videoAvailable: false,
cameraStream: null,
infoLinkPos: {},
workerScript: null
detectWorker: null,
vidWorker: null
}
},
setup() {
@@ -187,7 +190,6 @@
created () {
let loadOtherSettings = localStorage.getItem('otherSettings')
if (loadOtherSettings) this.otherSettings = JSON.parse(loadOtherSettings)
let modelRoot = this.isCordova ? 'https://localhost' : '.'
this.detectorName = this.f7route.params.region
switch (this.detectorName) {
case 'thorax':
@@ -203,9 +205,9 @@
this.activeRegion = 3
break;
}
this.modelLocation = `${modelRoot}/models/${this.detectorName}${this.otherSettings.mini ? '-mini' : ''}/model.json`
this.miniLocation = `${modelRoot}/models/${this.detectorName}-mini/model.json`
fetch(`${modelRoot}/models/${this.detectorName}/classes.json`)
this.modelLocation = URL.parse(`../models/${this.detectorName}${this.otherSettings.mini ? '-mini' : ''}/model.json`,import.meta.url).href
this.miniLocation = URL.parse(`../models/${this.detectorName}-mini/model.json`,import.meta.url).href
fetch(URL.parse(`../models/${this.detectorName}/classes.json`,import.meta.url).href)
.then((mod) => { return mod.json() })
.then((classes) => {
this.classesList = classes
@@ -215,8 +217,8 @@
if (loadServerSettings) this.serverSettings = JSON.parse(loadServerSettings)
},
mounted () {
const mountWorker = new SharedWorker('../assets/detect-worker.js',{type: 'module'})
mountWorker.port.onmessage = (eMount) => {
this.detectWorker = new detectionWorker()
this.detectWorker.onmessage = (eMount) => {
self = this
if (eMount.data.error) {
console.log(eMount.data.message)
@@ -224,13 +226,32 @@
}
self.modelLoading = false
}
this.vidWorker = new detectionWorker()
this.vidWorker.onmessage = (eMount) => {
self = this
if (eMount.data.error) {
console.log(eMount.data.message)
f7.dialog.alert(`ALVINN AI nano model error: ${eMount.data.message}`)
}
}
if (this.serverSettings && this.serverSettings.use) {
this.getRemoteLabels()
this.modelLoading = false
} else {
this.modelLoading = true
mountWorker.port.postMessage({call: 'loadModel', weights: this.modelLocation, preload: true})
if (this.isSafari) {
this.loadModel(this.modelLocation, true).then(() => {
this.modelLoading = false
}).catch((e) => {
console.log(e.message)
f7.dialog.alert(`ALVINN AI model error: ${e.message}`)
this.modelLoading = false
})
} else {
this.detectWorker.postMessage({call: 'loadModel', weights: this.modelLocation, preload: true})
this.vidWorker.postMessage({call: 'loadModel', weights: this.miniLocation, preload: true})
}
}
window.onresize = (e) => { if (this.$refs.image_cvs) this.selectChip('redraw') }
},
@@ -283,7 +304,7 @@
infoLinkTarget () {
if (!this.getInfoUrl) return ''
let structure = this.showResults.find( r => r.resultIndex == this.selectedChip)
return this.getInfoUrl + structure.label.replaceAll(' ','_')
return structure ? this.getInfoUrl + structure.label.replaceAll(' ','_') : ''
}
},
methods: {
@@ -292,12 +313,12 @@
return `--chip-media-gradient: conic-gradient(from ${270 - (confFactor * 360 / 2)}deg, hsl(${confFactor * 120}deg, 100%, 50%) ${confFactor}turn, hsl(${confFactor * 120}deg, 50%, 66%) ${confFactor}turn)`
},
async setData () {
const detectWorker = new SharedWorker('../assets/detect-worker.js',{type: 'module'})
detectWorker.port.onmessage = (eDetect) => {
this.detectWorker.onmessage = (eDetect) => {
self = this
if (eDetect.data.error) {
self.detecting = false
self.resultData = {}
loadFailure()
f7.dialog.alert(`ALVINN structure finding error: ${eDetect.data.message}`)
} else if (eDetect.data.success == 'detection') {
self.detecting = false
@@ -307,28 +328,45 @@
}
self.uploadDirty = true
} else if (eDetect.data.success == 'model') {
this.reloadModel = false
loadSuccess(true)
self.reloadModel = false
loadSuccess()
}
}
let loadSuccess = null
let loadFailure = null
let modelReloading = new Promise((res, rej) => {
let modelReloading = null
if (this.isSafari && this.reloadModel) {
await this.loadModel(this.modelLocation)
this.reloadModel = false
} else {
modelReloading = new Promise((res, rej) => {
loadSuccess = res
loadFailure = rej
if (this.reloadModel) {
detectWorker.port.postMessage({call: 'loadModel', weights: this.modelLocation})
this.detectWorker.postMessage({call: 'loadModel', weights: this.modelLocation})
} else {
loadSuccess(true)
loadSuccess()
}
})
}
if (this.serverSettings && this.serverSettings.use) {
this.remoteDetect()
} else {
} else if (!this.isSafari) {
Promise.all([modelReloading,createImageBitmap(this.imageView)]).then(res => {
detectWorker.port.postMessage({call: 'localDetect', image: res[1]}, [res[1]])
this.detectWorker.postMessage({call: 'localDetect', image: res[1]}, [res[1]])
})
} else {
this.localDetect(this.imageView).then(dets => {
this.detecting = false
this.resultData = dets
this.uploadDirty = true
}).catch((e) => {
console.log(e.message)
this.detecting = false
this.resultData = {}
f7.dialog.alert(`ALVINN structure finding error: ${e.message}`)
})
}
},
@@ -345,9 +383,10 @@
navigator.camera.getPicture(this.getImage, this.onFail, { quality: 50, destinationType: Camera.DestinationType.DATA_URL, correctOrientation: true });
return
}
if (mode == "camera") {
if (mode == "camera" && !this.otherSettings.disableVideo) {
this.videoAvailable = await this.openCamera(this.$refs.image_container)
if (this.videoAvailable) {
this.selectedChip = -1
this.imageLoaded = false
this.imageView.src = null
this.$refs.image_cvs.style['background-image'] = 'none'
@@ -356,8 +395,10 @@
var vidElement = this.$refs.vid_viewer
vidElement.width = trackDetails.width
vidElement.height = trackDetails.height
if (!this.otherSettings.disableVideo) {
if (this.isSafari) {
this.videoFrameDetect(vidElement)
} else {
this.videoFrameDetectWorker(vidElement)
}
return
}
@@ -471,13 +512,9 @@
}).then( () => {
const [imCanvas, _] = this.resetView()
imCanvas.style['background-image'] = `url(${this.imageView.src})`
/******
* setTimeout is not a good solution, but it's the only way
* I can find to not cut off drawing of the canvas background
******/
// setTimeout(() => {
f7.utils.nextFrame(() => {
this.setData()
// }, 1)
})
}).catch((e) => {
console.log(e.message)
f7.dialog.alert(`Error loading image: ${e.message}`)

View File

@@ -1,7 +1,114 @@
import * as tf from '@tensorflow/tfjs'
import { f7 } from 'framework7-vue'
let model = null
export default {
methods: {
async loadModel(weights, preload) {
if (model && model.modelURL == weights) {
return model
} else if (model) {
tf.dispose(model)
}
model = await tf.loadGraphModel(weights)
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
/*****************
* If preloading then run model
* once on fake data to preload
* weights for a faster response
*****************/
if (preload) {
const dummyT = tf.ones([1,modelWidth,modelHeight,3])
model.predict(dummyT)
}
return model
},
async localDetect(imageData) {
console.time('mx: pre-process')
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
let gTense = null
const input = tf.tidy(() => {
gTense = tf.image.rgbToGrayscale(tf.image.resizeBilinear(tf.browser.fromPixels(imageData), [modelWidth, modelHeight])).div(255.0).expandDims(0)
return tf.concat([gTense,gTense,gTense],3)
})
tf.dispose(gTense)
console.timeEnd('mx: pre-process')
console.time('mx: run prediction')
const res = model.predict(input)
const tRes = tf.transpose(res,[0,2,1])
const rawRes = tRes.arraySync()[0]
console.timeEnd('mx: run prediction')
console.time('mx: post-process')
const outputSize = res.shape[1]
let rawBoxes = []
let rawScores = []
for (var i = 0; i < rawRes.length; i++) {
var getScores = rawRes[i].slice(4)
if (getScores.every( s => s < .05)) { continue }
var getBox = rawRes[i].slice(0,4)
var boxCalc = [
(getBox[0] - (getBox[2] / 2)) / modelWidth,
(getBox[1] - (getBox[3] / 2)) / modelHeight,
(getBox[0] + (getBox[2] / 2)) / modelWidth,
(getBox[1] + (getBox[3] / 2)) / modelHeight,
]
rawBoxes.push(boxCalc)
rawScores.push(getScores)
}
if (rawBoxes.length > 0) {
const tBoxes = tf.tensor2d(rawBoxes)
let tScores = null
let resBoxes = null
let validBoxes = []
let structureScores = null
let boxes_data = []
let scores_data = []
let classes_data = []
for (var c = 0; c < outputSize - 4; c++) {
structureScores = rawScores.map(x => x[c])
tScores = tf.tensor1d(structureScores)
resBoxes = await tf.image.nonMaxSuppressionAsync(tBoxes,tScores,10,0.5,.05)
validBoxes = resBoxes.dataSync()
tf.dispose(resBoxes)
if (validBoxes) {
boxes_data.push(...rawBoxes.filter( (_, idx) => validBoxes.includes(idx)))
var outputScores = structureScores.filter( (_, idx) => validBoxes.includes(idx))
scores_data.push(...outputScores)
classes_data.push(...outputScores.fill(c))
}
}
validBoxes = []
tf.dispose(tBoxes)
tf.dispose(tScores)
tf.dispose(tRes)
const valid_detections_data = classes_data.length
var output = {
detections: []
}
for (var i =0; i < valid_detections_data; i++) {
var [dLeft, dTop, dRight, dBottom] = boxes_data[i]
output.detections.push({
"top": dTop,
"left": dLeft,
"bottom": dBottom,
"right": dRight,
"label": this.detectorLabels[classes_data[i]].name,
"confidence": scores_data[i] * 100
})
}
}
tf.dispose(res)
tf.dispose(input)
console.timeEnd('mx: post-process')
return output || { detections: [] }
},
getRemoteLabels() {
var self = this
var modelURL = `http://${this.serverSettings.address}:${this.serverSettings.port}/detectors`
@@ -65,5 +172,65 @@ export default {
this.detecting = false
f7.dialog.alert('No connection to remote ALVINN instance. Please check app settings.')
},
async videoFrameDetect (vidData) {
await this.loadModel(this.miniLocation)
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
const imCanvas = this.$refs.image_cvs
const imageCtx = imCanvas.getContext("2d")
const target = this.$refs.target_image
await tf.nextFrame();
imCanvas.width = imCanvas.clientWidth
imCanvas.height = imCanvas.clientHeight
imageCtx.clearRect(0,0,imCanvas.width,imCanvas.height)
var imgWidth
var imgHeight
const imgAspect = vidData.width / vidData.height
const rendAspect = imCanvas.width / imCanvas.height
if (imgAspect >= rendAspect) {
imgWidth = imCanvas.width
imgHeight = imCanvas.width / imgAspect
} else {
imgWidth = imCanvas.height * imgAspect
imgHeight = imCanvas.height
}
while (this.videoAvailable) {
console.time('mx: frame-process')
try {
const input = tf.tidy(() => {
return tf.image.resizeBilinear(tf.browser.fromPixels(vidData), [modelWidth, modelHeight]).div(255.0).expandDims(0)
})
const res = model.predict(input)
const rawRes = tf.transpose(res,[0,2,1]).arraySync()[0]
let rawCoords = []
if (rawRes) {
for (var i = 0; i < rawRes.length; i++) {
let getScores = rawRes[i].slice(4)
if (getScores.some( s => s > .5)) {
let foundTarget = rawRes[i].slice(0,2)
foundTarget.push(Math.max(...getScores))
rawCoords.push(foundTarget)
}
}
imageCtx.clearRect(0,0,imCanvas.width,imCanvas.height)
for (var coord of rawCoords) {
console.log(`x: ${coord[0]}, y: ${coord[1]}`)
let pointX = (imCanvas.width - imgWidth) / 2 + (coord[0] / modelWidth) * imgWidth -5
let pointY = (imCanvas.height - imgHeight) / 2 + (coord[1] / modelHeight) * imgHeight -5
imageCtx.globalAlpha = coord[2]
imageCtx.drawImage(target, pointX, pointY, 20, 20)
}
}
tf.dispose(input)
tf.dispose(res)
tf.dispose(rawRes)
} catch (e) {
console.log(e)
}
console.timeEnd('mx: frame-process')
await tf.nextFrame();
}
}
}
}

View File

@@ -8,6 +8,7 @@
<f7-block-title medium>Details</f7-block-title>
<f7-list>
<f7-list-item title="Version" :after="alvinnVersion"></f7-list-item>
<f7-list-item v-if="isSafari" title="Safari" after="Workers disabled"></f7-list-item>
</f7-list>
<f7-block-title medium>Models</f7-block-title>
<f7-list style="width: 100%;">
@@ -52,6 +53,7 @@
miniHeadneckDetails: {},
alvinnVersion: store().getVersion,
isCordova: !!window.cordova,
isSafari: store().isSafari,
otherSettings: {}
}
},