diff --git a/src/assets/target.svg b/src/assets/target.svg
new file mode 100644
index 0000000..0c71e01
--- /dev/null
+++ b/src/assets/target.svg
@@ -0,0 +1,14 @@
+
+
+
diff --git a/src/pages/camera-mixin.js b/src/pages/camera-mixin.js
index 97e9a07..c801246 100644
--- a/src/pages/camera-mixin.js
+++ b/src/pages/camera-mixin.js
@@ -1,3 +1,5 @@
+import * as tf from '@tensorflow/tfjs'
+
export default {
methods: {
async openCamera(imContain) {
@@ -5,7 +7,6 @@ export default {
const devicesList = await navigator.mediaDevices.enumerateDevices()
this.videoDeviceAvailable = devicesList.some( d => d.kind == "videoinput")
if (this.videoDeviceAvailable) {
- navigator.mediaDevices.getUserMedia({video: true})
var vidConstraint = {
video: {
width: {
@@ -38,8 +39,23 @@ export default {
tempCtx.drawImage(vidViewer, 0, 0)
this.getImage(tempCVS.toDataURL())
},
- async videoStream() {
- //TODO
+ async videoStream () {
+ const vidElement = this.$refs.vid_viewer
+ this.videoAvailable = false
+ const devicesList = await navigator.mediaDevices.enumerateDevices()
+ this.videoDeviceAvailable = devicesList.some( d => d.kind == "videoinput")
+ if (this.videoDeviceAvailable) {
+ var vidConstraint = {
+ //resizeWidth: 640,
+ //resizeHeight: 640,
+ facingMode: 'environment'
+ }
+ tf.data.webcam(vidElement, vidConstraint).then( webData => {
+ this.videoAvailable = true
+ this.cameraStream = vidElement.captureStream()
+ this.videoFrameDetect(webData)
+ })
+ }
}
}
}
\ No newline at end of file
diff --git a/src/pages/detect.vue b/src/pages/detect.vue
index 33a86c3..dec4a40 100644
--- a/src/pages/detect.vue
+++ b/src/pages/detect.vue
@@ -6,12 +6,12 @@
-
-
+
@@ -163,7 +164,6 @@
case 'thorax':
this.activeRegion = 0
this.detectorName = 'thorax'
- //this.classesList = thoraxClasses
/* VITE setting */
this.modelLocation = `../models/thorax${this.otherSettings.mini ? '-mini' : ''}/model.json`
/* PWA Build setting */
@@ -172,8 +172,13 @@
break;
case 'abdomen':
this.activeRegion = 1
- this.detectorName = 'combined'
- break;
+ this.detectorName = 'abdomen'
+ /* VITE setting */
+ this.modelLocation = `../models/abdomen${this.otherSettings.mini ? '-mini' : ''}/model.json`
+ /* PWA Build setting */
+ //this.modelLocation = `./models/abdomen${this.otherSettings.mini ? '-mini' : ''}/model.json`
+ this.modelLocationCordova = `https://localhost/models/abdomen${this.otherSettings.mini ? '-mini' : ''}/model.json`
+ break;
case 'limbs':
this.activeRegion = 2
this.detectorName = 'defaultNew'
@@ -284,7 +289,14 @@
}
if (mode == "camera") {
this.videoAvailable = await this.openCamera(this.$refs.image_container)
- if (this.videoAvailable) { return }
+ if (this.videoAvailable) {
+ var trackDetails = this.cameraStream.getVideoTracks()[0].getSettings()
+ var vidElement = this.$refs.vid_viewer
+ vidElement.width = trackDetails.width
+ vidElement.height = trackDetails.height
+ this.videoFrameDetect(vidElement)
+ return
+ }
}
if (mode == 'sample') {
f7.dialog.create({
@@ -421,7 +433,7 @@
this.selectChip(findBox >= 0 ? this.resultData.detections[findBox].resultIndex : this.selectedChip)
},
box2cvs(boxInput) {
- if (!boxInput) return []
+ if (!boxInput || boxInput.length == 0) return []
const boxList = boxInput.length ? boxInput : [boxInput]
const [imCanvas, imageCtx] = this.resetView()
var imgWidth
diff --git a/src/pages/detection-mixin.js b/src/pages/detection-mixin.js
index 46eb871..7de057a 100644
--- a/src/pages/detection-mixin.js
+++ b/src/pages/detection-mixin.js
@@ -1,5 +1,7 @@
import * as tf from '@tensorflow/tfjs'
+import { image } from '@tensorflow/tfjs'
import { f7 } from 'framework7-vue'
+import { nextTick } from 'vue'
var model = null
@@ -150,7 +152,59 @@ export default {
remoteTimeout () {
this.detecting = false
f7.dialog.alert('No connection to remote ALVINN instance. Please check app settings.')
- }
+ },
+ async videoFrameDetect (vidData) {
+ const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
+ const imCanvas = this.$refs.image_cvs
+ const imageCtx = imCanvas.getContext("2d")
+ const target = this.$refs.target_image
+ await tf.nextFrame();
+ imCanvas.width = imCanvas.clientWidth
+ imCanvas.height = imCanvas.clientHeight
+ imageCtx.clearRect(0,0,imCanvas.width,imCanvas.height)
+ var imgWidth
+ var imgHeight
+ const imgAspect = vidData.clientWidth / vidData.clientHeight
+ const rendAspect = imCanvas.width / imCanvas.height
+ if (imgAspect >= rendAspect) {
+ imgWidth = imCanvas.width
+ imgHeight = imCanvas.width / imgAspect
+ } else {
+ imgWidth = imCanvas.height * imgAspect
+ imgHeight = imCanvas.height
+ }
+ while (this.videoAvailable) {
+ console.time('frame-process')
+ try {
+ const input = tf.tidy(() => {
+ return tf.image.resizeBilinear(tf.browser.fromPixels(vidData), [modelWidth, modelHeight]).div(255.0).expandDims(0)
+ })
+ const res = model.predict(input)
+ const rawRes = tf.transpose(res,[0,2,1]).arraySync()[0]
+ let rawCoords = []
+ if (rawRes) {
+ for (var i = 0; i < rawRes.length; i++) {
+ var getScores = rawRes[i].slice(4)
+ if (getScores.some( s => s > .5)) {
+ rawCoords.push(rawRes[i].slice(0,2))
+ }
+ }
+
+ imageCtx.clearRect(0,0,imCanvas.width,imCanvas.height)
+ for (var coord of rawCoords) {
+ console.log(`x: ${coord[0]}, y: ${coord[1]}`)
+ let pointX = (imCanvas.width - imgWidth) / 2 + (coord[0] / modelWidth) * imgWidth -5
+ let pointY = (imCanvas.height - imgHeight) / 2 + (coord[1] / modelHeight) * imgHeight -5
+ imageCtx.drawImage(target, pointX, pointY, 20, 20)
+ }
+ }
+ } catch (e) {
+ console.log(e)
+ }
+ console.timeEnd('frame-process')
+ await tf.nextFrame();
+ }
+ }
}
}
\ No newline at end of file