17 Commits

Author SHA1 Message Date
1cc76c5950 Update version to 0.4.0
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-07 16:00:21 -07:00
091a38f7ab Clear file input on image load
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-07 15:18:35 -07:00
5ec51d9ad9 Add hidden structures badge
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-07 14:52:48 -07:00
4d356df02a Add stop camera button
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-07 14:41:58 -07:00
4af06f0fe5 Update thorax model and improve model performance (#125)
Closes: #117

This bumps the thorax model from the yolo nano to the yolo sm (64 x 640 size) but greatly improves model performance by running a fake detection event on page load to get the model parameters in memory.

As a result of that change a new loading message was required so the f7 preloader was switched out for an f7 progress bar and more messaging was added during various stages.  The progress bar fixes the previous issue with the preloader.

Signed-off-by: Justin Georgi <justin.georgi@gmail.com>

Reviewed-on: #125
2024-03-07 13:06:14 -07:00
0e99679e00 Fix PWA build errors
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-06 16:41:18 -07:00
1f25a75cec Split detect.vue (#122)
Closes: #112

New mixins for camera and detection (remote and local) and new css for detection page.

Reviewed-on: #122
2024-03-06 14:36:27 -07:00
b00b5cf492 Add performance tracking to local detection
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-05 15:51:28 -07:00
5086a52849 Return to standard model folder locations
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-05 15:06:01 -07:00
77240fe9bf Fix missing detection progress indicator on camera load
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-05 06:48:07 -07:00
7f9a7cb2d2 Cordova thorax model location update
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-04 17:18:15 -07:00
c452776689 Add better camera access for non-cordova deployments (#116)
This adds direct javascript access to the camera (with permission requests). Capturing an image with the camera now happens right on the detect page and not a separate window.

This also opens the possibility of live detection on the web app.

Reviewed-on: Georgi_Lab/ALVINN_f7#116
2024-03-04 17:03:28 -07:00
239558194e Cordova package upgrade
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-03-04 16:58:59 -07:00
069ad74e30 Add full yolov8 compatible post-processing (#115)
This updates the local detection to yolov8-based models from the older yolov5 based models.

This includes significant additional post-processing of the raw yolov8 output, but also means that the smaller nano yolo model is available as well as all of the updated ALVINN thorax training data.

Signed-off-by: Justin Georgi <justin.georgi@gmail.com>

Reviewed-on: Georgi_Lab/ALVINN_f7#115
2024-03-04 15:03:20 -07:00
7f1aa63f07 Add site agreement expiration
Closes: #88

Reviewed-on: Georgi_Lab/ALVINN_f7#111
2024-02-23 22:00:05 -07:00
e644aa26f0 Add PWA to README
Signed-off-by: Justin Georgi <justin.georgi@gmail.com>
2024-02-22 20:28:45 -07:00
19a669dc98 Fix decoding error on android image load (#108)
Closes: #105

Signed-off-by: Justin Georgi <justin.georgi@gmail.com>

Reviewed-on: Georgi_Lab/ALVINN_f7#108
2024-02-22 17:29:44 -07:00
37 changed files with 581 additions and 362 deletions

View File

@@ -5,7 +5,7 @@ Anatomy Lab Visual Identification Neural Net (A.L.V.I.N.N) is a f7 based app for
## Install
* **Android:** Download the latest Android apk in [packages](https://gitea.azgeorgis.net/Georgi_Lab/ALVINN_f7/packages) and open the downloaded file to install.
* **iOS:** To do
* **Web app:** To do
* **Web app:** Download the latest Web zip file in [packages](https://gitea.azgeorgis.net/Georgi_Lab/ALVINN_f7/packages) and extract the files to a folder location available via web access then visit that location in your web browser.
* **Run from source:** Clone this repository and in the root directory run `npm install` followed by `npm start`. For more information see [f7 info](f7_info.md).
## Quick Start

View File

@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='utf-8'?>
<widget id="edu.midwestern.alvinn" version="0.3.0" xmlns="http://www.w3.org/ns/widgets" xmlns:cdv="http://cordova.apache.org/ns/1.0" xmlns:android="http://schemas.android.com/apk/res/android">
<widget id="edu.midwestern.alvinn" version="0.4.0" xmlns="http://www.w3.org/ns/widgets" xmlns:cdv="http://cordova.apache.org/ns/1.0" xmlns:android="http://schemas.android.com/apk/res/android">
<name>ALVINN</name>
<description>Anatomy Lab Visual Identification Neural Network.</description>
<author email="jgeorg@midwestern.edu" href="https://midwestern.edu">

View File

@@ -1,12 +1,12 @@
{
"name": "edu.midwestern.alvinn",
"version": "0.1.0-b",
"version": "0.3.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "edu.midwestern.alvinn",
"version": "0.1.0-b",
"version": "0.3.0",
"license": "Apache-2.0",
"devDependencies": {
"cordova-android": "^12.0.1",

View File

@@ -1,7 +1,7 @@
{
"name": "edu.midwestern.alvinn",
"displayName": "ALVINN",
"version": "0.3.0",
"version": "0.4.0",
"description": "Anatomy Lab Visual Identification Neural Network.",
"main": "index.js",
"scripts": {

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "alvinn",
"version": "0.2.1",
"version": "0.3.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "alvinn",
"version": "0.2.1",
"version": "0.3.0",
"hasInstallScript": true,
"license": "UNLICENSED",
"dependencies": {

View File

@@ -1,7 +1,7 @@
{
"name": "alvinn",
"private": true,
"version": "0.3.0",
"version": "0.4.0",
"description": "ALVINN",
"repository": "",
"license": "UNLICENSED",

View File

@@ -11,7 +11,7 @@
<f7-list-item link="/contact/" view=".view-main" panel-close=".panel-left">Contact</f7-list-item>
</f7-list>
<f7-toolbar class="panel-bar" position="bottom">
<span>version 0.3.0</span>
<span>version 0.4.0</span>
</f7-toolbar>
</f7-page>
</f7-view>
@@ -65,6 +65,7 @@
return {
rememberAgreement: false,
siteAgreement: false,
dateAgreement: null,
showDisclaimer: true
}
},
@@ -74,8 +75,11 @@
var loadedSettings = JSON.parse(loadSiteSettings)
this.siteAgreement = loadedSettings.siteAgreement
this.rememberAgreement = loadedSettings.rememberAgreement
this.dateAgreement = loadedSettings.dateAgreement && new Date(loadedSettings.dateAgreement)
}
if (this.siteAgreement && this.rememberAgreement) {
var curDate = new Date ()
var agreeStillValid = this.dateAgreement && (curDate < this.dateAgreement.setMonth(this.dateAgreement.getMonth() + 3))
if (this.siteAgreement && this.rememberAgreement && agreeStillValid) {
this.showDisclaimer = false
store().agree()
}
@@ -91,7 +95,8 @@
store().agree()
let newSettings = {
siteAgreement: this.siteAgreement,
rememberAgreement: this.rememberAgreement
rememberAgreement: this.rememberAgreement,
dateAgreement: new Date()
}
let saveSiteSettings = new Promise(
(saved,failed) => {
@@ -141,7 +146,7 @@
// Register service worker (only on production build)
serviceWorker: process.env.NODE_ENV ==='production' ? {
path: '/service-worker.js',
path: './service-worker.js',
} : {},
// Input settings

188
src/css/detect.css Normal file
View File

@@ -0,0 +1,188 @@
/*Styles for the structure detection page*/
/*Basic style*/
.detect-grid {
display: grid;
grid-template-columns: 1fr;
grid-template-rows: 1fr minmax(calc(var(--f7-chip-height) + 33px), auto) auto min-content;
grid-template-areas:
"image-view"
"result-view"
"detect-settings"
"menu-view";
justify-items: center;
height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
max-height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
}
.image-container {
grid-area: image-view;
width: 100%;
height: 100%;
min-width: 0;
min-height: 0;
position: relative;
display: flex;
align-self: stretch;
}
.popover-button-menu {
max-width: 90vw;
max-height: 90vh;
width: auto;
}
.segment-button-menu {
flex-wrap: nowrap;
flex-direction: column;
max-height: 100%;
min-height: 0px;
}
.chip-media {
background-color: var(--chip-media-background) !important;
}
.chip-results {
display: flex;
flex-wrap: wrap;
gap: 5px;
padding: 10px;
--f7-chip-border-radius: 16px;
--f7-chip-media-size: 32px;
--f7-chip-font-weight: normal;
overflow-y: auto;
overflow-x: hidden;
max-height: 100%;
}
.chip-results .chip {
padding-left: 8px;
}
.progress-hide {
display: hidden;
}
.selected-chip {
font-weight: 500;
box-shadow: 4px 4px 1px var(--avn-theme-color);
transform: translate(-2px, -2px);
}
.detect-inputs {
display: flex;
align-items: center;
margin: 5px;
width: 100%;
max-width: 400px;
min-width: 192px;
}
.level-slide-vert {
display: none;
}
.image-menu {
grid-area: menu-view;
margin: 5px;
max-width: 400px;
min-width: 192px;
}
.image-menu .button {
aspect-ratio: 1;
height: auto;
padding: 5px;
flex: 1 1 0%;
}
.image-menu > .button > svg {
aspect-ratio: 1;
height: auto;
width: 100%;
}
.segment-button-menu .button {
padding: 8px;
aspect-ratio: 1;
width: auto;
flex: 1 1 0%;
max-height: 100px;
max-width: 100px;
}
/*Additional styles for small format landscape orientation*/
@media (max-height: 450px) and (orientation: landscape) {
.detect-grid {
grid-template-columns: minmax(0,1fr) minmax(56px,max-content) auto auto;
grid-template-rows: calc(100vh - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom) - 64px);
grid-template-areas:
"image-view result-view detect-settings menu-view";
justify-items: stretch;
align-items: stretch;
height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
max-height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
position: relative;
}
.chip-results {
flex-direction: column;
max-height: 100%;
justify-self: start;
flex-wrap: nowrap;
}
.detect-inputs {
flex-direction: column;
min-width: 0;
max-width: 72px;
}
.level-slide-horz {
display: none;
}
.level-slide-vert {
display: block;
}
.image-container {
flex-direction: column;
}
.image-menu {
flex-direction: column;
aspect-ratio: .25;
width: auto;
min-width: 0;
height: 100%;
}
.image-menu .button {
aspect-ratio: 1;
width: auto;
height: 100%;
flex: 1 1 0%;
border-bottom: 1px solid var(--f7-segmented-raised-divider-color);
border-bottom-left-radius: 0px !important;
}
.segment-button-menu {
flex-direction: row;
max-height: 100%;
min-height: 0px;
}
.segment-button-menu .button {
height: auto;
flex: 1 1 0%;
max-height: 100px;
max-width: 100px;
}
.button > svg {
width: 100%;
height: auto;
}
}

View File

@@ -10,7 +10,7 @@
* Disables use of inline scripts in order to mitigate risk of XSS vulnerabilities. To change this:
* Enable inline JS: add 'unsafe-inline' to default-src
-->
<meta http-equiv="Content-Security-Policy" content="worker-src blob:; child-src blob: gap:; img-src 'self' blob: data:; default-src * 'self' 'unsafe-inline' 'unsafe-eval' data: content:">
<meta http-equiv="Content-Security-Policy" content="worker-src 'self' blob:; child-src blob: gap:; img-src 'self' blob: data:; default-src * 'self' 'unsafe-inline' 'unsafe-eval' data: content:">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no, viewport-fit=cover">
<meta name="theme-color" content="#fff">

View File

@@ -9,32 +9,32 @@
"theme_color": "#002f65",
"icons": [
{
"src": "/icons/128x128.png",
"src": "../icons/128x128.png",
"sizes": "128x128",
"type": "image/png"
},
{
"src": "/icons/144x144.png",
"src": "../icons/144x144.png",
"sizes": "144x144",
"type": "image/png"
},
{
"src": "/icons/152x152.png",
"src": "../icons/152x152.png",
"sizes": "152x152",
"type": "image/png"
},
{
"src": "/icons/192x192.png",
"src": "../icons/192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/icons/256x256.png",
"src": "../icons/256x256.png",
"sizes": "256x256",
"type": "image/png"
},
{
"src": "/icons/512x512.png",
"src": "../icons/512x512.png",
"sizes": "512x512",
"type": "image/png"
}

View File

@@ -0,0 +1,43 @@
[
"Abdominal diaphragm",
"Aorta",
"Azygous vein",
"Brachiocephalic trunk",
"Caudal vena cava",
"Cranial vena cava",
"Esophagus",
"External abdominal oblique",
"Iliocostalis",
"Latissimus dorsi",
"Left atrium",
"Left auricle",
"Left lung",
"Left subclavian artery",
"Left ventricle",
"Longissimus",
"Pectoralis profundus",
"Pectoralis superficialis",
"Pericardium",
"Phrenic nerve",
"Primary bronchus",
"Pulmonary artery",
"Pulmonary trunk",
"Pulmonary vein",
"Rectus abdominis",
"Rectus thoracis",
"Recurrent laryngeal nerve",
"Rhomboideus",
"Right atrium",
"Right auricle",
"Right lung",
"Right ventricle",
"Scalenus",
"Serratus dorsalis caudalis",
"Serratus dorsalis cranialis",
"Serratus ventralis",
"Spinalis",
"Sympathetic chain",
"Trachea",
"Trapezius",
"Vagus nerve"
]

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,53 @@
description: Ultralytics best model trained on /data/ALVINN/Thorax/Thorax 0.1.0/thorax.yaml
author: Ultralytics
license: AGPL-3.0 https://ultralytics.com/license
date: '2024-03-07T16:03:03.296997'
version: 8.1.20
stride: 32
task: detect
batch: 1
imgsz:
- 640
- 640
names:
0: Abdominal diaphragm
1: Aorta
2: Azygous vein
3: Brachiocephalic trunk
4: Caudal vena cava
5: Cranial vena cava
6: Esophagus
7: External abdominal oblique
8: Iliocostalis
9: Latissimus dorsi
10: Left atrium
11: Left auricle
12: Left lung
13: Left subclavian artery
14: Left ventricle
15: Longissimus
16: Pectoralis profundus
17: Pectoralis superficialis
18: Pericardium
19: Phrenic nerve
20: Primary bronchus
21: Pulmonary artery
22: Pulmonary trunk
23: Pulmonary vein
24: Rectus abdominis
25: Rectus thoracis
26: Recurrent laryngeal nerve
27: Rhomboideus
28: Right atrium
29: Right auricle
30: Right lung
31: Right ventricle
32: Scalenus
33: Serratus dorsalis caudalis
34: Serratus dorsalis cranialis
35: Serratus ventralis
36: Spinalis
37: Sympathetic chain
38: Trachea
39: Trapezius
40: Vagus nerve

File diff suppressed because one or more lines are too long

View File

@@ -1,12 +0,0 @@
[
"Right lung",
"Diaphragm",
"Heart",
"Caudal vena cava",
"Cranial vena cava",
"Phrenic nerve",
"Trachea",
"Vagus nerve",
"Left Lung",
"Aorta"
]

File diff suppressed because one or more lines are too long

45
src/pages/camera-mixin.js Normal file
View File

@@ -0,0 +1,45 @@
export default {
methods: {
async openCamera() {
var cameraLoaded = false
const devicesList = await navigator.mediaDevices.enumerateDevices()
this.videoDeviceAvailable = devicesList.some( d => d.kind == "videoinput")
if (this.videoDeviceAvailable) {
navigator.mediaDevices.getUserMedia({video: true})
var vidConstraint = {
video: {
width: {
ideal: 1920
},
height: {
ideal: 1080
},
facingMode: 'environment'
},
audio: false
}
const stream = await navigator.mediaDevices.getUserMedia(vidConstraint);
cameraLoaded = true
this.cameraStream = stream
}
return cameraLoaded
},
closeCamera () {
this.cameraStream.getTracks().forEach( t => t.stop())
this.videoAvailable = false
},
captureVidFrame() {
const vidViewer = this.$refs.vid_viewer
vidViewer.pause()
let tempCVS = document.createElement('canvas')
tempCVS.height = vidViewer.videoHeight || parseInt(vidViewer.style.height)
tempCVS.width = vidViewer.videoWidth || parseInt(vidViewer.style.width)
const tempCtx = tempCVS.getContext('2d')
tempCtx.drawImage(vidViewer, 0, 0)
this.getImage(tempCVS.toDataURL())
},
async videoStream() {
//TODO
}
}
}

View File

@@ -7,9 +7,13 @@
<f7-block class="detect-grid">
<div class="image-container">
<canvas id="im-draw" ref="image_cvs" @click="structureClick" :style="`display: ${imageLoaded ? 'block' : 'none'}; flex: 1 1 0%; max-width: 100%; max-height: 100%; min-width: 0; min-height: 0; background-size: contain; background-position: center; background-repeat: no-repeat`" />
<SvgIcon v-if="!imageView" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" />
<SvgIcon v-if="!imageView && !videoAvailable" :icon="f7route.params.region" fill-color="var(--avn-theme-color)" @click="selectImage" />
<div class="vid-container" v-if="videoAvailable" style="width: 100%; height: 100%">
<video id="vid-view" ref="vid_viewer" :srcObject="cameraStream" :autoPlay="true" style="width: 100%; height: 100%"></video>
<f7-button @click="captureVidFrame()" style="position: absolute; bottom: 32px; left: 50%; transform: translateX(-50%);" fill large>Capture</f7-button>
</div>
</div>
<div v-if="(resultData && resultData.detections) || detecting" class="chip-results" style="grid-area: result-view; flex: 0 0 auto; align-self: center;">
<div class="chip-results" style="grid-area: result-view; flex: 0 0 auto; align-self: center;">
<f7-chip v-for="result in showResults.filter( r => { return r.aboveThreshold && r.isSearched && !r.isDeleted })"
:class="(result.resultIndex == selectedChip) ? 'selected-chip' : ''"
:text="result.label"
@@ -20,8 +24,8 @@
@delete="deleteChip(result.resultIndex)"
:style="chipGradient(result.confidence)"
/>
<span v-if="numResults == 0 && !detecting" style="height: var(--f7-chip-height); font-size: calc(var(--f7-chip-height) - 4px); font-weight: bolder; margin: 2px;">No results.</span>
<f7-preloader v-if="detecting || modelLoading" size="32" style="color: var(--avn-theme-color);" />
<div v-if="!numResults" style="height: var(--f7-chip-height); width: 100%; text-align: center; font-size: calc(var(--f7-chip-height) - 4px); font-weight: bolder; margin: 2px;">{{ message }}</div>
<f7-progressbar v-if="(detecting || modelLoading)" style="width: 100%;" :infinite="true" />
</div>
<div v-if="showDetectSettings" class="detect-inputs" style="grid-area: detect-settings;">
<f7-range class="level-slide-horz" :min="0" :max="100" :step="1" @range:change="onLevelChange" v-model:value="detectorLevel" type="range" style="flex: 1 1 100%"/>
@@ -37,17 +41,21 @@
<f7-button popover-open="#region-popover">
<RegionIcon :region="activeRegion" />
</f7-button>
<f7-button popover-open="#capture-popover">
<f7-button v-if="!videoAvailable" :class="(!modelLoading) ? '' : 'disabled'" popover-open="#capture-popover">
<SvgIcon icon="camera_add"/>
</f7-button>
<f7-button v-if="videoAvailable" @click="closeCamera()">
<SvgIcon icon="no_photography"/>
</f7-button>
<f7-button @click="() => showDetectSettings = !showDetectSettings" :class="(imageLoaded) ? '' : 'disabled'">
<SvgIcon icon="visibility"/>
<f7-badge v-if="numResults && (showResults.length != numResults)" color="red" style="position: absolute; right: 15%; top: 15%;">{{ showResults.length - numResults }}</f7-badge>
</f7-button>
<f7-button :class="(numResults && uploadDirty && viewedAll) ? '' : 'disabled'" @click="submitData">
<SvgIcon :icon="(uploadUid) ? 'cloud_done' : 'cloud_upload'"/>
</f7-button>
</f7-segmented>
<input type="file" ref="image_chooser" @change="getImage()" accept="image/*" capture="environment" style="display: none;"/>
<input type="file" ref="image_chooser" @change="getImage()" accept="image/*" style="display: none;"/>
</f7-block>
<f7-panel :id="detectorName + '-settings'" right cover :backdrop="false" :container-el="`#${detectorName}-detect-page`">
@@ -86,7 +94,7 @@
<f7-button style="height: auto; width: auto;" popover-close="#capture-popover" @click="selectImage('file')">
<SvgIcon icon="photo_library" />
</f7-button>
<f7-button style="height: auto; width: auto;" popover-close="#capture-popover" class="disabled" @click="videoStream">
<f7-button style="height: auto; width: auto;" popover-close="#capture-popover" @click="videoStream">
<SvgIcon icon="videocam"/>
</f7-button>
</f7-segmented>
@@ -95,189 +103,7 @@
</f7-page>
</template>
<style>
.detect-grid {
display: grid;
grid-template-columns: 1fr;
grid-template-rows: 1fr 56px auto min-content;
grid-template-areas:
"image-view"
"result-view"
"detect-settings"
"menu-view";
justify-items: center;
height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
max-height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
}
.image-container {
grid-area: image-view;
width: 100%;
height: 100%;
min-width: 0;
min-height: 0;
position: relative;
display: flex;
align-self: stretch;
}
.popover-button-menu {
max-width: 90vw;
max-height: 90vh;
width: auto;
}
.segment-button-menu {
flex-wrap: nowrap;
flex-direction: column;
max-height: 100%;
min-height: 0px;
}
.chip-media {
background-color: var(--chip-media-background) !important;
}
.chip-results {
display: flex;
flex-wrap: wrap;
gap: 5px;
padding: 10px;
--f7-chip-border-radius: 16px;
--f7-chip-media-size: 32px;
--f7-chip-font-weight: normal;
overflow-y: auto;
max-height: 100%;
}
.chip-results .chip {
padding-left: 8px;
}
.selected-chip {
font-weight: 500;
box-shadow: 4px 4px 1px var(--avn-theme-color);
transform: translate(-2px, -2px);
}
.detect-inputs {
display: flex;
align-items: center;
margin: 5px;
width: 100%;
max-width: 400px;
min-width: 192px;
}
.level-slide-vert {
display: none;
}
.image-menu {
grid-area: menu-view;
margin: 5px;
/*width: 90%;*/
max-width: 400px;
min-width: 192px;
}
.image-menu .button {
aspect-ratio: 1;
height: auto;
padding: 5px;
flex: 1 1 0%;
}
.image-menu > .button > svg {
aspect-ratio: 1;
height: auto;
width: 100%;
}
.segment-button-menu .button {
padding: 8px;
aspect-ratio: 1;
width: auto;
flex: 1 1 0%;
max-height: 100px;
max-width: 100px;
}
@media (max-height: 450px) and (orientation: landscape) {
.detect-grid {
grid-template-columns: minmax(0,1fr) minmax(56px,max-content) auto auto;
grid-template-rows: calc(100vh - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom) - 64px);
grid-template-areas:
"image-view result-view detect-settings menu-view";
justify-items: stretch;
align-items: stretch;
height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
max-height: calc(100% - var(--f7-navbar-height) - var(--f7-safe-area-top) - var(--f7-safe-area-bottom));
position: relative;
}
.chip-results {
flex-direction: column;
max-height: 100%;
justify-self: start;
flex-wrap: nowrap;
}
.detect-inputs {
flex-direction: column;
min-width: 0;
max-width: 72px;
}
.level-slide-horz {
display: none;
}
.level-slide-vert {
display: block;
}
.image-container {
flex-direction: column;
}
.image-menu {
flex-direction: column;
aspect-ratio: .25;
width: auto;
min-width: 0;
height: 100%;
}
.image-menu .button {
aspect-ratio: 1;
width: auto;
height: 100%;
flex: 1 1 0%;
border-bottom: 1px solid var(--f7-segmented-raised-divider-color);
border-bottom-left-radius: 0px !important;
}
.segment-button-menu {
flex-direction: row;
max-height: 100%;
min-height: 0px;
}
.segment-button-menu .button {
height: auto;
flex: 1 1 0%;
max-height: 100px;
max-width: 100px;
}
.button > svg {
width: 100%;
height: auto;
}
}
</style>
<style src="../css/detect.css" />
<script>
import { f7 } from 'framework7-vue'
@@ -287,12 +113,13 @@
import SvgIcon from '../components/svg-icon.vue'
import submitMixin from './submit-mixin'
import detectMixin from './local-detect'
import detectionMixin from './detection-mixin'
import cameraMixin from './camera-mixin'
import thoraxClasses from '../models/thorax_tfwm/classes.json'
import thoraxClasses from '../models/thorax/classes.json'
export default {
mixins: [submitMixin, detectMixin],
mixins: [submitMixin, detectionMixin, cameraMixin],
props: {
f7route: Object,
},
@@ -321,7 +148,10 @@
uploadUid: null,
uploadDirty: false,
modelLocation: '',
modelLoading: false
modelLoading: true,
videoDeviceAvailable: false,
videoAvailable: false,
cameraStream: null
}
},
setup() {
@@ -334,10 +164,10 @@
this.detectorName = 'thorax'
this.classesList = thoraxClasses
/* VITE setting */
this.modelLocation = '../models/thorax_tfwm/model.json'
this.modelLocation = '../models/thorax/model.json'
/* PWA Build setting */
//this.modelLocation = './models/thorax_tfwm/model.json'
this.modelLocationCordova = 'https://localhost/models/thorax_tfwm/model.json'
//this.modelLocation = './models/thorax/model.json'
this.modelLocationCordova = 'https://localhost/models/thorax/model.json'
break;
case 'abdomen':
this.activeRegion = 1
@@ -353,43 +183,36 @@
}
var loadServerSettings = localStorage.getItem('serverSettings')
if (loadServerSettings) this.serverSettings = JSON.parse(loadServerSettings)
var self = this
},
mounted () {
if (this.serverSettings && this.serverSettings.use) {
var modelURL = `http://${this.serverSettings.address}:${this.serverSettings.port}/detectors`
var xhr = new XMLHttpRequest()
xhr.open("GET", modelURL)
xhr.setRequestHeader('Content-Type', 'application/json')
xhr.timeout = 10000
xhr.ontimeout = this.remoteTimeout
xhr.onload = function () {
if (this.status !== 200) {
console.log(xhr.response)
const errorResponse = JSON.parse(xhr.response)
f7.dialog.alert(`ALVINN has encountered an error: ${errorResponse.error}`)
return;
}
var detectors = JSON.parse(xhr.response).detectors
var findLabel = detectors
.find( d => { return d.name == self.detectorName } )?.labels
.filter( l => { return l != "" } ).sort()
.map( l => { return {'name': l, 'detect': true} } )
self.detectorLabels = findLabel || []
}
xhr.send()
this.getRemoteLabels()
this.modelLoading = false
} else {
self.modelLoading = true
self.detectorLabels = self.classesList.map( l => { return {'name': l, 'detect': true} } )
self.loadModel(self.isCordova ? self.modelLocationCordova : self.modelLocation).then(() => {
self.modelLoading = false
this.modelLoading = true
this.detectorLabels = this.classesList.map( l => { return {'name': l, 'detect': true} } )
this.loadModel(this.isCordova ? this.modelLocationCordova : this.modelLocation).then(() => {
this.modelLoading = false
}).catch((e) => {
console.log(e.message)
f7.dialog.alert(`ALVINN AI model error: ${e.message}`)
this.modelLoading = false
})
}
window.onresize = (e) => { this.selectChip('redraw') }
},
computed: {
message () {
if (this.modelLoading) {
return "Preparing ALVINN..."
} else if (this.detecting) {
return "Finding structures..."
} else if (this.numResults == 0 && this.imageLoaded) {
return "No results."
} else {
return "ALVINN is ready."
}
},
showResults () {
var filteredResults = this.resultData.detections
if (!filteredResults) return []
@@ -427,50 +250,21 @@
return `--chip-media-background: hsl(${confVal / 100 * 120}deg 100% 50%)`
},
setData () {
var self = this
if (this.serverSettings && this.serverSettings.use) {
var modelURL = `http://${this.serverSettings.address}:${this.serverSettings.port}/detect`
var xhr = new XMLHttpRequest()
xhr.open("POST", modelURL)
xhr.timeout = 10000
xhr.ontimeout = this.remoteTimeout
xhr.setRequestHeader('Content-Type', 'application/json')
xhr.onload = function () {
self.detecting = false
if (this.status !== 200) {
console.log(xhr.response)
const errorResponse = JSON.parse(xhr.response)
f7.dialog.alert(`ALVINN has encountered an error: ${errorResponse.error}`)
return;
}
self.resultData = JSON.parse(xhr.response)
self.uploadDirty = true
}
var doodsData = {
"detector_name": this.detectorName,
"detect": {
"*": 1
},
"data": this.imageView.src.split(',')[1]
}
xhr.send(JSON.stringify(doodsData))
this.remoteDetect()
} else {
this.localDetect(this.imageView).then(dets => {
self.detecting = false
self.resultData = dets
self.uploadDirty = true
this.detecting = false
this.resultData = dets
this.uploadDirty = true
}).catch((e) => {
console.log(e.message)
f7.dialog.alert(`ALVINN structure finding error: ${e.message}`)
})
console.log(e.message)
this.detecting = false
this.resultData = {}
f7.dialog.alert(`ALVINN structure finding error: ${e.message}`)
})
}
},
remoteTimeout () {
this.detecting = false
f7.dialog.alert('No connection to remote ALVINN instance. Please check app settings.')
},
selectAll (ev) {
if (ev.target.checked) {
this.detectorLabels.forEach( s => s.detect = true )
@@ -478,18 +272,17 @@
this.detectorLabels.forEach( s => s.detect = false )
}
},
selectImage (mode) {
async selectImage (mode) {
this.imageLoadMode = mode
if (mode == "camera") {
this.$refs.image_chooser.setAttribute("capture","environment")
} else {
this.$refs.image_chooser.removeAttribute("capture")
}
if (this.isCordova && mode == "camera") {
navigator.camera.getPicture(this.getImage, this.onFail, { quality: 50, destinationType: Camera.DestinationType.DATA_URL, correctOrientation: true });
} else {
var loadResult = this.$refs.image_chooser.click()
navigator.camera.getPicture(this.getImage, this.onFail, { quality: 50, destinationType: Camera.DestinationType.DATA_URL, correctOrientation: true });
return
}
if (mode == "camera") {
this.videoAvailable = await this.openCamera()
if (this.videoAvailable) { return }
}
this.$refs.image_chooser.click()
},
onFail (message) {
alert(`Camera fail: ${message}`)
@@ -536,13 +329,19 @@
return [imCanvas, imageCtx]
},
getImage (searchImage) {
let loadImage =new Promise(resolve => {
if (this.isCordova && this.imageLoadMode == "camera") {
let loadImage = new Promise(resolve => {
if (this.videoAvailable) {
this.closeCamera()
this.detecting = true
resolve(searchImage)
} else if (this.isCordova && this.imageLoadMode == "camera") {
this.detecting = true
resolve('data:image/jpg;base64,' + searchImage)
} else {
const searchImage = this.$refs.image_chooser.files[0]
this.$refs.image_chooser.value=[]
var reader = new FileReader()
reader.addEventListener("loadend", () => {
reader.addEventListener("load", () => {
this.detecting = true
resolve(reader.result)
})
@@ -552,6 +351,7 @@
loadImage.then((imgData) => {
this.imageLoaded = true
this.resultData = {}
this.selectedChip = -1
this.imageView = new Image()
this.imageView.src = imgData
return(this.imageView.decode())
@@ -559,10 +359,8 @@
const [imCanvas, _] = this.resetView()
imCanvas.style['background-image'] = `url(${this.imageView.src})`
/******
* setTimeout is not a good solution,
* but it's the only way I can find to
* not cut off drawing of of the progress
* spinner
* setTimeout is not a good solution, but it's the only way
* I can find to not cut off drawing of the progress spinner
******/
setTimeout(() => {
this.setData()
@@ -572,10 +370,6 @@
f7.dialog.alert(`Error loading image: ${e.message}`)
})
},
videoStream() {
//TODO
return null
},
async submitData () {
var uploadData = this.showResults
.filter( d => { return d.aboveThreshold && d.isSearched && !d.isDeleted })

View File

@@ -0,0 +1,151 @@
import * as tf from '@tensorflow/tfjs'
import { f7 } from 'framework7-vue'
import { nextTick } from 'vue'
var model = null
export default {
methods: {
async loadModel(weights) {
await nextTick()
model = await tf.loadGraphModel(weights)
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
const dummyT = tf.ones([1,modelWidth,modelHeight,3])
model.predict(dummyT) //Run model once to preload weights for better response time
return model
},
async localDetect(imageData) {
console.time('pre-process')
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3)
const input = tf.tidy(() => {
return tf.image.resizeBilinear(tf.browser.fromPixels(imageData), [modelWidth, modelHeight]).div(255.0).expandDims(0)
})
console.timeEnd('pre-process')
console.time('run prediction')
const res = model.predict(input)
console.timeEnd('run prediction')
console.time('post-process')
const detectAttempts = res.shape[2]
const outputSize = res.shape[1]
const rawRes = tf.transpose(res,[0,2,1]).dataSync()
let rawBoxes = []
let rawScores = []
for (var i = 0; i < detectAttempts; i++) {
var getBox = rawRes.slice((i * outputSize),(i * outputSize) + 4)
var boxCalc = [
(getBox[0] - (getBox[2] / 2)) / modelWidth,
(getBox[1] - (getBox[3] / 2)) / modelHeight,
(getBox[0] + (getBox[2] / 2)) / modelWidth,
(getBox[1] + (getBox[3] / 2)) / modelHeight,
]
rawBoxes.push(boxCalc)
rawScores.push(rawRes.slice((i * outputSize) + 4,(i + 1) * outputSize))
}
const tBoxes = tf.tensor2d(rawBoxes)
let tScores = null
let boxes_data = []
let scores_data = []
let classes_data = []
for (var c = 0; c < outputSize - 4; c++) {
tScores = rawScores.map(x => x[c])
var validBoxes = await tf.image.nonMaxSuppressionAsync(tBoxes,tf.tensor1d(tScores),10,0.5,.05)
validBoxes = validBoxes.dataSync()
if (validBoxes) {
boxes_data.push(...rawBoxes.filter( (_, idx) => validBoxes.includes(idx)))
var outputScores = tScores.filter( (_, idx) => validBoxes.includes(idx))
scores_data.push(...outputScores)
classes_data.push(...outputScores.fill(c))
}
}
const valid_detections_data = classes_data.length
var output = {
detections: []
}
for (var i =0; i < valid_detections_data; i++) {
var [dLeft, dTop, dRight, dBottom] = boxes_data[i]
output.detections.push({
"top": dTop,
"left": dLeft,
"bottom": dBottom,
"right": dRight,
"label": this.detectorLabels[classes_data[i]].name,
"confidence": scores_data[i] * 100
})
}
tf.dispose(res)
tf.dispose(tBoxes)
console.timeEnd('post-process')
return output
},
getRemoteLabels() {
var self = this
var modelURL = `http://${this.serverSettings.address}:${this.serverSettings.port}/detectors`
var xhr = new XMLHttpRequest()
xhr.open("GET", modelURL)
xhr.setRequestHeader('Content-Type', 'application/json')
xhr.timeout = 10000
xhr.ontimeout = this.remoteTimeout
xhr.onload = function () {
if (this.status !== 200) {
console.log(xhr.response)
const errorResponse = JSON.parse(xhr.response)
f7.dialog.alert(`ALVINN has encountered an error: ${errorResponse.error}`)
return
}
var detectors = JSON.parse(xhr.response).detectors
var findLabel = detectors
.find( d => { return d.name == self.detectorName } )?.labels
.filter( l => { return l != "" } ).sort()
.map( l => { return {'name': l, 'detect': true} } )
self.detectorLabels = findLabel || []
}
xhr.onerror = function (e) {
f7.dialog.alert('ALVINN has encountered an unknown server error')
return
}
xhr.send()
},
remoteDetect() {
var self = this
var modelURL = `http://${this.serverSettings.address}:${this.serverSettings.port}/detect`
var xhr = new XMLHttpRequest()
xhr.open("POST", modelURL)
xhr.timeout = 10000
xhr.ontimeout = this.remoteTimeout
xhr.setRequestHeader('Content-Type', 'application/json')
xhr.onload = function () {
self.detecting = false
if (this.status !== 200) {
console.log(xhr.response)
const errorResponse = JSON.parse(xhr.response)
f7.dialog.alert(`ALVINN has encountered an error: ${errorResponse.error}`)
return;
}
self.resultData = JSON.parse(xhr.response)
self.uploadDirty = true
}
var doodsData = {
"detector_name": this.detectorName,
"detect": {
"*": 1
},
"data": this.imageView.src.split(',')[1]
}
xhr.send(JSON.stringify(doodsData))
},
remoteTimeout () {
this.detecting = false
f7.dialog.alert('No connection to remote ALVINN instance. Please check app settings.')
}
}
}

View File

@@ -1,48 +0,0 @@
import * as tf from '@tensorflow/tfjs'
var model = null
export default {
methods: {
async loadModel(weights) {
model = await tf.loadGraphModel(weights).then(graphModel => {
return graphModel
})
},
async localDetect(imageData) {
const [modelWidth, modelHeight] = model.inputs[0].shape.slice(1, 3);
const input = tf.tidy(() => {
return tf.image.resizeBilinear(tf.browser.fromPixels(imageData), [modelWidth, modelHeight]).div(255.0).expandDims(0)
})
var results = model.executeAsync(input).then(res => {
const [boxes, scores, classes, valid_detections] = res;
const boxes_data = boxes.dataSync();
const scores_data = scores.dataSync();
const classes_data = classes.dataSync();
const valid_detections_data = valid_detections.dataSync()[0];
tf.dispose(res)
var output = {
detections: []
}
for (var i =0; i < valid_detections_data; i++) {
var [dLeft, dTop, dRight, dBottom] = boxes_data.slice(i * 4, (i + 1) * 4);
output.detections.push({
"top": dTop,
"left": dLeft,
"bottom": dBottom,
"right": dRight,
"label": this.detectorLabels[classes_data[i]].name,
"confidence": scores_data[i] * 100
})
}
return output
})
return results
}
}
}