言語切り替え

頭の傾きに合わせてサーボモーターを動かす

頭の傾きに合わせてサーボモーターを動かす

頭を傾けた時、同じ方向にサーボモーターを動かします。

頭の傾きを検出するにはTensorFlow.jsのPoseNetという機械学習ライブラリを使用します。

用意するもの

配線

プログラム

<!DOCTYPE html>
<html>

<head>
  <meta charset="utf-8">
  <script src="https://unpkg.com/@tensorflow/tfjs"></script>
  <script src="https://unpkg.com/@tensorflow-models/posenet"></script>
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <script src="https://obniz.io/js/jquery-3.2.1.min.js"></script>
  <script src="https://unpkg.com/obniz@3.2.0/obniz.js" crossorigin="anonymous"></script>
  <style>
    #canvas{
      position: absolute;
      top: 0;
      left: 0;
      pointer-events: none;
    }

    #wrapper{
      position: relative;
    }
  </style>
</head>
<body>
  <div id="wrapper">
    <video id="video" width="800px" height="450px" autoplay="1"></video>
    <canvas id="canvas" width="800px" height="450px"></canvas>
  </div>
</body>

<script>
  let canvas = document.getElementById("canvas");
  let ctx = canvas.getContext("2d");
  ctx.strokeStyle = "#00ff33";
  const obniz = new Obniz("OBNIZ_ID_HERE");
  obniz.onconnect = async function () {
    let servo = obniz.wired("ServoMotor", {gnd:0,vcc:1,signal:2});
    const imageScaleFactor = 0.2;
    const outputStride = 16;
    const contentWidth = 800;
    const contentHeight = 600;

    bindPage();

    async function bindPage() {
      const net = await posenet.load(); 
      let video;
      try {
          video = await loadVideo();
      } catch(e) {
          console.error(e);
          return;
      }
      detectPoseInRealTime(video, net);
    }

    async function loadVideo() {
      const video = await setupCamera();
      video.play();
      return video;
    }

    async function setupCamera() {
      const video = document.getElementById('video');
      if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
        const stream = await navigator.mediaDevices.getUserMedia({
          'audio': false,
          'video': true});
        video.srcObject = stream;

        return new Promise(resolve => {
          video.onloadedmetadata = () => {
            resolve(video);
          };
        });
      }else {
        const errorMessage = "This browser does not support video capture, or this device does not have a camera";
        alert(errorMessage);
        return Promise.reject(errorMessage);
      }
    }

    function detectPoseInRealTime(video, net) {
      const flipHorizontal = true;
        async function poseDetectionFrame() {
          let poses = [];
          const pose = await net.estimateSinglePose(video, imageScaleFactor, flipHorizontal, outputStride);
          poses.push(pose);
          culcAngle(poses);
          setTimeout(arguments.callee, 1000);
        }
      poseDetectionFrame();
    }

    function culcAngle(poses){
      let nose = poses[0].keypoints[0].position;
      let sholderLeft = poses[0].keypoints[5].position;
      let sholderRight = poses[0].keypoints[6].position;
      let center = {x:0, y:0};
      center.x = (sholderLeft.x + sholderRight.x) / 2;
      center.y = (sholderLeft.y + sholderRight.y) / 2;
      draw(nose, sholderLeft, sholderRight, center);

      let angle = -Math.atan2(nose.y - center.y, nose.x - center.x) * 180 / Math.PI;
      console.log(angle);
      
      if(angle > 90){
        let angle2 = (angle - 90)*2 + angle;
        servo.angle(angle2);
      }else{
        let angle2 = 90 - (90 - angle)*2 ;
        servo.angle(angle2);
      }
    }
    
    function draw(nose, sholderLeft, sholderRight, center){
      ctx.clearRect(0,0,canvas.width,canvas.height);
      ctx.beginPath();
      ctx.moveTo(sholderLeft.x, sholderLeft.y);
      ctx.lineTo(sholderRight.x, sholderRight.y);
      ctx.moveTo(center.x, center.y);
      ctx.lineTo(nose.x, nose.y);
      ctx.closePath();
      ctx.stroke();
    }
  }

</script>
</html>

参考サイト

うごかす

関連記事