问题描述
我想了解如何停止张量流处理。 我想创建一个停止视频和处理的功能。
基本上,练习结束后,我希望它停止,包括视频捕获和tesorflow处理。我还想实现一个关闭按钮。但是我似乎无法做的是弄清楚如何结束视频捕获和张量流过程
我到处搜索过,但什么也没找到,我不知道该如何停止。 这里的代码
谢谢
/**
* @license
* copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License,Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,software
* distributed under the License is distributed on an "AS IS" BASIS,* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import * as posenet from '@tensorflow-models/posenet';
import dat from 'dat.gui';
import Stats from 'stats.js';
import {drawBoundingBox,drawKeypoints,drawSkeleton,isMobile,toggleLoadingUI,global_zero} from './demo_util';
const videoWidth = 600;
const videoHeight = 500;
var rep_count = 0;
var wko_started = 0;
var done = 0;
var timer = 0;
var t0 = 0;
var count_down = 0;
var act_dur = 0;
var last_rep = 0;
const stats = new Stats();
/**
* Loads a the camera to be used in the demo
*
*/
async function setupCamera() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
throw new Error(
'browser API navigator.mediaDevices.getUserMedia not available');
}
const video = document.getElementById('video');
video.width = videoWidth;
video.height = videoHeight;
const mobile = isMobile();
const stream = await navigator.mediaDevices.getUserMedia({
'audio': false,'video': {
facingMode: 'user',width: mobile ? undefined : videoWidth,height: mobile ? undefined : videoHeight,},});
video.srcObject = stream;
return new Promise((resolve) => {
video.onloadedMetadata = () => {
resolve(video);
};
});
}
async function loadVideo() {
const video = await setupCamera();
video.play();
return video;
}
const defaultQuantBytes = 2;
const defaultMobileNetMultiplier = isMobile() ? 0.50 : 0.75;
const defaultMobileNetStride = 16;
const defaultMobileNetInputResolution = 500;
const guiState = {
algorithm: 'multi-pose',input: {
architecture: 'MobileNetV1',outputStride: defaultMobileNetStride,inputResolution: defaultMobileNetInputResolution,multiplier: defaultMobileNetMultiplier,quantBytes: defaultQuantBytes
},activity: {
Activity: 'Weight Lifting',Repetitions: 10,Duration_min: 60
},singlePoseDetection: {
minPoseConfidence: 0.1,minPartConfidence: 0.5,multiPoseDetection: {
maxPoseDetections: 5,minPoseConfidence: 0.15,minPartConfidence: 0.1,nmsRadius: 30.0,output: {
showVideo: true,showSkeleton: true,showPoints: true,showBoundingBox: false,net: null,};
/**
* Sets up dat.gui controller on the top-right of the window
*/
function setupGui(cameras,net) {
guiState.net = net;
if (cameras.length > 0) {
guiState.camera = cameras[0].deviceid;
}
const gui = new dat.GUI({width: 300});
let architectureController = null;
// guiState[tryresnetButtonName] = function() {
// architectureController.setValue('resnet50')
// };
//gui.add(guiState,tryresnetButtonName).name(tryresnetButtonText);
//updateTryresnetButtonDatGuiCss();
// The single-pose algorithm is faster and simpler but requires only one
// person to be in the frame or results will be innaccurate. Multi-pose works
// for more than 1 person
const algorithmController =
gui.add(guiState,'algorithm',['single-pose','multi-pose']);
// The input parameters have the most effect on accuracy and speed of the
// network
let activityController = null;
let activity = gui.addFolder('Workout');
activityController =
activity.add(guiState.activity,'Activity',['Weight Lifting','Jumping jack']);
guiState.atividade = guiState.activity.activity;
activity.add(guiState.activity,'Repetitions',50);
activity.add(guiState.activity,'Duration_min',180).name('Duration (sec)');
var obj = { add:function(){
console.log("clicked");
rep_count = 0;
count_down = 0;
wko_started = 1
done = 0
global_zero();
}};
activity.add(obj,'add').name('Start Workout');
activity.open();
let single = gui.addFolder('Single Pose Detection');
single.add(guiState.singlePoseDetection,'minPoseConfidence',0.0,1.0);
single.add(guiState.singlePoseDetection,'minPartConfidence',1.0);
let multi = gui.addFolder('Multi Pose Detection');
multi.add(guiState.multiPoseDetection,'maxPoseDetections')
.min(1)
.max(20)
.step(1);
multi.add(guiState.multiPoseDetection,1.0);
multi.add(guiState.multiPoseDetection,1.0);
// nms Radius: controls the minimum distance between poses that are returned
// defaults to 20,which is probably fine for most use cases
multi.add(guiState.multiPoseDetection,'nmsRadius').min(0.0).max(40.0);
multi.open();
let output = gui.addFolder('Output');
output.add(guiState.output,'showVideo');
output.add(guiState.output,'showSkeleton');
output.add(guiState.output,'showPoints');
output.add(guiState.output,'showBoundingBox');
output.open();
algorithmController.onChange(function(value) {
switch (guiState.algorithm) {
case 'single-pose':
multi.close();
single.open();
break;
case 'multi-pose':
single.close();
multi.open();
break;
}
});
}
/**
* Sets up a frames per second panel on the top-left of the window
*/
function setupFPS() {
stats.showPanel(0); // 0: fps,1: ms,2: mb,3+: custom
document.getElementById('main').appendChild(stats.dom);
}
/**
* Feeds an image to posenet to estimate poses - this is where the magic
* happens. This function loops with a requestAnimationFrame method.
*/
function detectPoseInRealTime(video,net) {
const canvas = document.getElementById('output');
const ctx = canvas.getContext('2d');
// since images are being fed from a webcam,we want to Feed in the
// original image and then just flip the keypoints' x coordinates. If instead
// we flip the image,then correcting left-right keypoint pairs requires a
// permutation on all the keypoints.
const flipPoseHorizontal = true;
canvas.width = videoWidth;
canvas.height = videoHeight;
async function poseDetectionFrame() {
if (guiState.changetoArchitecture) {
// Important to purge variables and free up GPU memory
guiState.net.dispose();
toggleLoadingUI(true);
guiState.net = await posenet.load({
architecture: guiState.changetoArchitecture,outputStride: guiState.outputStride,inputResolution: guiState.inputResolution,multiplier: guiState.multiplier,});
toggleLoadingUI(false);
guiState.architecture = guiState.changetoArchitecture;
guiState.changetoArchitecture = null;
}
if (guiState.changetoMultiplier) {
guiState.net.dispose();
toggleLoadingUI(true);
guiState.net = await posenet.load({
architecture: guiState.architecture,multiplier: +guiState.changetoMultiplier,quantBytes: guiState.quantBytes
});
toggleLoadingUI(false);
guiState.multiplier = +guiState.changetoMultiplier;
guiState.changetoMultiplier = null;
}
if (guiState.changetoOutputStride) {
// Important to purge variables and free up GPU memory
guiState.net.dispose();
toggleLoadingUI(true);
guiState.net = await posenet.load({
architecture: guiState.architecture,outputStride: +guiState.changetoOutputStride,quantBytes: guiState.quantBytes
});
toggleLoadingUI(false);
guiState.outputStride = +guiState.changetoOutputStride;
guiState.changetoOutputStride = null;
}
if (guiState.changetoInputResolution) {
// Important to purge variables and free up GPU memory
guiState.net.dispose();
toggleLoadingUI(true);
guiState.net = await posenet.load({
architecture: guiState.architecture,inputResolution: +guiState.changetoInputResolution,quantBytes: guiState.quantBytes
});
toggleLoadingUI(false);
guiState.inputResolution = +guiState.changetoInputResolution;
guiState.changetoInputResolution = null;
}
if (guiState.changetoQuantBytes) {
// Important to purge variables and free up GPU memory
guiState.net.dispose();
toggleLoadingUI(true);
guiState.net = await posenet.load({
architecture: guiState.architecture,quantBytes: guiState.changetoQuantBytes
});
toggleLoadingUI(false);
guiState.quantBytes = guiState.changetoQuantBytes;
guiState.changetoQuantBytes = null;
}
// Begin monitoring code for frames per second
stats.begin();
let poses = [];
let minPoseConfidence;
let minPartConfidence;
switch (guiState.algorithm) {
case 'single-pose':
const pose = await guiState.net.estimatePoses(video,{
flipHorizontal: flipPoseHorizontal,decodingMethod: 'single-person'
});
poses = poses.concat(pose);
minPoseConfidence = +guiState.singlePoseDetection.minPoseConfidence;
minPartConfidence = +guiState.singlePoseDetection.minPartConfidence;
break;
case 'multi-pose':
let all_poses = await guiState.net.estimatePoses(video,decodingMethod: 'multi-person',maxDetections: guiState.multiPoseDetection.maxPoseDetections,scoreThreshold: guiState.multiPoseDetection.minPartConfidence,nmsRadius: guiState.multiPoseDetection.nmsRadius
});
poses = poses.concat(all_poses);
minPoseConfidence = +guiState.multiPoseDetection.minPoseConfidence;
minPartConfidence = +guiState.multiPoseDetection.minPartConfidence;
break;
}
ctx.clearRect(0,videoWidth,videoHeight);
if (guiState.output.showVideo) {
ctx.save();
ctx.scale(-1,1);
ctx.translate(-videoWidth,0);
ctx.drawImage(video,videoHeight);
ctx.restore();
}
// For each pose (i.e. person) detected in an image,loop through the poses
// and draw the resulting skeleton and keypoints if over certain confidence
// scores
poses.forEach(({score,keypoints}) => {
if (score >= minPoseConfidence) {
if (guiState.output.showPoints) {
//console.log(guiState.activity.Repetitions);
console.log('Repetitions: ',rep_count);
rep_count = rep_count + drawKeypoints(keypoints,minPartConfidence,ctx,rep_count,wko_started,guiState.activity.Activity);
if (wko_started == 1 && done == 0){
ctx.font = "25px Arial";
ctx.fillText('Repetitions: ' + Math.round(rep_count/2),10,90,);
ctx.fillText(guiState.activity.Activity + ' workout has started',100,490,);
if (timer == 0){
if (count_down == 0){
t0 = new Date() / 1000;
count_down = 1;
}
ctx.font = "100px Arial";
var Now = new Date() / 1000;
if (Now - t0 <= 5){
ctx.fillText(5 - Math.round(Now - t0),250,);
Now = new Date() / 1000;
}
else{
timer = 1;
t0 = new Date() / 1000;
rep_count = 0;
}
}
else{
act_dur = Math.round(new Date() / 1000 - t0)
// console.log(new Date() / 1000)
console.log("Actual duration: ",act_dur);
console.log("guiState duration: ",guiState.activity.Duration_min);
console.log("Equal? ",act_dur == guiState.activity.Duration_min);
ctx.fillText('Duration: ' + act_dur + 's',430,);
if (guiState.activity.Activity == 'Weight Lifting' && Math.round(rep_count/2) == guiState.activity.Repetitions){
done = 1
}
else if (guiState.activity.Activity == 'Jumping jack' && act_dur == guiState.activity.Duration_min){
done = 1
}
}
}
else if(done == 1) {
if (wko_started == 1){
last_rep = rep_count;
}
wko_started = 0;
ctx.font = "45px Arial";
ctx.fillStyle = "green";
ctx.fillText('DONE!!',210,);
if (guiState.activity.Activity == 'Weight Lifting'){
ctx.fillText('Duration: ' + act_dur + 's',160,300,);
rep_count = 0;
global_zero();
}
else if (guiState.activity.Activity == 'Jumping jack'){
ctx.fillText('Repetitions: ' + Math.round(last_rep/2),);
rep_count = 0;
global_zero();
}
timer = 0;
}
else if (rep_count != guiState.activity.Repetitions || done == 0){
wko_started = 0;
rep_count = 0;
global_zero();
ctx.font = "29px Arial";
ctx.fillText('Workout not started,plase start a new train...',30,);
}
}
if (guiState.output.showSkeleton) {
drawSkeleton(keypoints,ctx);
}
if (guiState.output.showBoundingBox) {
drawBoundingBox(keypoints,ctx);
}
}
});
// End monitoring code for frames per second
stats.end();
requestAnimationFrame(poseDetectionFrame);
}
poseDetectionFrame();
}
/**
* Kicks off the demo by loading the posenet model,finding and loading
* available camera devices,and setting off the detectPoseInRealTime function.
*/
export async function bindPage() {
toggleLoadingUI(true);
const net = await posenet.load({
architecture: guiState.input.architecture,outputStride: guiState.input.outputStride,inputResolution: guiState.input.inputResolution,multiplier: guiState.input.multiplier,quantBytes: guiState.input.quantBytes
});
toggleLoadingUI(false);
let video;
try {
video = await loadVideo();
} catch (e) {
let info = document.getElementById('info');
info.textContent = 'this browser does not support video capture,' +
'or this device does not have a camera';
info.style.display = 'block';
throw e;
}
setupGui([],net);
//setupFPS();
detectPoseInRealTime(video,net);
}
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// kick off the demo
bindPage();
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)