未分類」カテゴリーアーカイブ

磁気コンパス補正データの可視化

9軸センサーBMX055の磁気コンパスのデータ(x,y,z)を取得して可視化してみました。センサーはi2cでRaspberry Piに接続しています。

jupyter notebookで3Dプロットした様子

赤の点は、x,y,zのそれぞれの平均値を示す。

BMX055からデータを取得

 -*- coding: utf-8 -*-
#
# https://taku-info.com/bmx055howtouse-mag/
#

from smbus import SMBus
import time
import math
import datetime
import csv

# I2C
ACCL_ADDR = 0x19
ACCL_R_ADDR = 0x02
GYRO_ADDR = 0x69
GYRO_R_ADDR = 0x02
MAG_ADDR = 0x13
MAG_R_ADDR = 0x42

i2c = SMBus(1)

def bmx_setup():
    # acc_data_setup : 加速度の値をセットアップ
    i2c.write_byte_data(ACCL_ADDR, 0x0F, 0x03)
    i2c.write_byte_data(ACCL_ADDR, 0x10, 0x08)
    i2c.write_byte_data(ACCL_ADDR, 0x11, 0x00)
    time.sleep(0.5)

    # gyr_data_setup : ジャイロ値をセットアップ
    i2c.write_byte_data(GYRO_ADDR, 0x0F, 0x04)
    i2c.write_byte_data(GYRO_ADDR, 0x10, 0x07)
    i2c.write_byte_data(GYRO_ADDR, 0x11, 0x00)
    time.sleep(0.5)

    # mag_data_setup : 地磁気値をセットアップ
    data = i2c.read_byte_data(MAG_ADDR, 0x4B)
    if(data == 0):
        i2c.write_byte_data(MAG_ADDR, 0x4B, 0x83)
        time.sleep(0.5)
    i2c.write_byte_data(MAG_ADDR, 0x4B, 0x01)
    i2c.write_byte_data(MAG_ADDR, 0x4C, 0x00)
    i2c.write_byte_data(MAG_ADDR, 0x4E, 0x84)
    i2c.write_byte_data(MAG_ADDR, 0x51, 0x04)
    i2c.write_byte_data(MAG_ADDR, 0x52, 0x16)
    time.sleep(0.5)

def acc_value():
    data = [0, 0, 0, 0, 0, 0]
    acc_data = [0.0, 0.0, 0.0]

    try:
        for i in range(6):
            data[i] = i2c.read_byte_data(ACCL_ADDR, ACCL_R_ADDR + i)

        for i in range(3):
            acc_data[i] = ((data[2*i + 1] * 256) + int(data[2*i] & 0xF0)) / 16
            if acc_data[i] > 2047:
                acc_data[i] -= 4096
            acc_data[i] *= 0.0098

    except IOError as e:
        print("I/O error({0}): {1}".format(e.errno, e.strerror))

    return acc_data

def gyro_value():
    data = [0, 0, 0, 0, 0, 0]
    gyro_data = [0.0, 0.0, 0.0]

    try:
        for i in range(6):
            data[i] = i2c.read_byte_data(GYRO_ADDR, GYRO_R_ADDR + i)

        for i in range(3):
            gyro_data[i] = (data[2*i + 1] * 256) + data[2*i]
            if gyro_data[i] > 32767:
                gyro_data[i] -= 65536
            gyro_data[i] *= 0.0038

    except IOError as e:
        print("I/O error({0}): {1}".format(e.errno, e.strerror))

    return gyro_data

def mag_value():
    data = [0, 0, 0, 0, 0, 0, 0, 0]
    mag_data = [0.0, 0.0, 0.0]

    try:
        for i in range(8):
            data[i] = i2c.read_byte_data(MAG_ADDR, MAG_R_ADDR + i)

        for i in range(3):
            if i != 2:
                mag_data[i] = ((data[2*i + 1] * 256) + (data[2*i] & 0xF8)) / 8
                if mag_data[i] > 4095:
                    mag_data[i] -= 8192
            else:
                mag_data[i] = ((data[2*i + 1] * 256) + (data[2*i] & 0xFE)) / 2
                if mag_data[i] > 16383:
                    mag_data[i] -= 32768

    except IOError as e:
        print("I/O error({0}): {1}".format(e.errno, e.strerror))

    return mag_data

if __name__ == "__main__":

    bmx_setup()
    time.sleep(0.1)
    now_time = datetime.datetime.now()
    filename = 'test_' + now_time.strftime('%Y%m%d_%H%M%S') + '.csv'
    # ファイル,1行目(カラム)の作成
    with open(filename, 'a') as f:
        writer = csv.writer(f)
        writer.writerow(['Mag_x', 'Mag_y', 'Mag_z'])
    while True:
        #acc = acc_value()
        #gyro= gyro_value()
        mag = mag_value()
        theta = math.atan2(mag[1],mag[0]) * 180.0 / 3.141592
        if ( theta < 0 ):
            theta = theta + 360.0
        '''
        theta = 360.0 - theta
        print("Accl -> x:{}, y:{}, z: {}".format(acc[0], acc[1], acc[2]))
        print("Gyro -> x:{}, y:{}, z: {}".format(gyro[0], gyro[1], gyro[2]))
        print("Mag -> x:{}, y:{}, z: {}".format(mag[0], mag[1], mag[2]))
        '''
        print(theta)
        time.sleep(0.02)
        with open(filename, 'a', newline="") as f:
            writer = csv.writer(f)
            writer.writerow([mag[0], mag[1], mag[2]])

取得したデータをjupyter notebookで可視化

%matplotlib nbagg

import os
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import math
import numpy as np

### データの読み込み
df = pd.read_csv('BMX055/data4.csv')
print(df)
a_x=np.average(df['Mag_x'])
a_y=np.average(df['Mag_y'])
a_z=np.average(df['Mag_z'])
print(round(a_x,2),round(a_y,2),round(a_z,2))
# ここからグラフ描画
 
# グラフの入れ物を用意する。
fig = plt.figure()
#ax = Axes3D(fig)    <--- warning対策
ax = fig.add_subplot(111, projection='3d') 
# 軸のラベルを設定する。
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')

# グラフを表示する。
ax.scatter3D(df['Mag_x'],df['Mag_y'],df['Mag_z'],color="blue")
ax.scatter3D(a_x,a_y,a_z,color="red")
plt.show()

RPA (Robotic Process Automation)

Webスクレイピング超入門】2時間で基礎を完全マスター!PythonによるWebスクレイピング入門 連結版
https://www.youtube.com/watch?v=VRFfAeW30qE

【初学者必見】Pythonで実データの需要予測を実装したい人がはじめに見る動画
https://www.youtube.com/watch?v=uKq_dgEUVfA&list=RDCMUC0xRMqPOyRNPTaL6BxhbCnQ&index=11

Python×自動化】PyAutoGUIを用いてPC操作の自動化方法を40分でわかりやすく解説!
https://www.youtube.com/watch?v=zmrbS98KXyo&list=RDCMUC0xRMqPOyRNPTaL6BxhbCnQ&index=10

今話題のPythonライブラリStreamlitを用いて、顔検出アプリの作成から公開までの流れをわかりやすく解説
https://www.youtube.com/watch?v=zpBjbK6jic0

rock pi4へraspiの手順でKubernetesをインストールしてみる

インストール手順の参照元:ラズパイでKubernetesクラスタを構築する

インストール先の環境

  • rock pi4 4GB RAM
  • Linux rock 4.4.154-110-rockchip-gcef30e88a9f5 #1 SMP Mon Jun 22 07:37:10 UTC 2020 aarch64 aarch64 aarch64 GNU/Linux
  • 18.04.5 LTS (Bionic Beaver)

次の手順でkubelet kubeadm kubectlをインストール

$ sudo -s
# curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt    -key add -
OK
# cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
> deb https://apt.kubernetes.io/ kubernetes-xenial main
> EOF
# apt-get update
# apt-get install -y kubelet kubeadm kubectl
#  kubeadm version -o yaml
clientVersion:
  buildDate: "2021-06-16T12:57:56Z"
  compiler: gc
  gitCommit: 092fbfbf53427de67cac1e9fa54aaa09a28371d7
  gitTreeState: clean
  gitVersion: v1.21.2
  goVersion: go1.16.5
  major: "1"
  minor: "21"
  platform: linux/arm64
#  cat /proc/sys/net/bridge/bridge-nf-call-iptables
1
$ kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.2", GitC    ommit:"092fbfbf53427de67cac1e9fa54aaa09a28371d7", GitTreeState:"clean", BuildDat    e:"2021-06-16T12:57:56Z", GoVersion:"go1.16.5", Compiler:"gc", Platform:"linux/a    rm64"}
# swapoff -a
# kubeadm init --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.21.2
{中略}
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.68.111:6443 --token iuzu6k.j2arujghto188qq1 \
        --discovery-token-ca-cert-hash sha256:bed560334a382d997a48491083e569dbaaac8b1a6d8804c9b917b8596d36b255
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

$  kubectl get node
The connection to the server 127.0.0.1:16443 was refused - did you specify the right host or port?


microk8sを試す(helm編)

WordPress Helm Chartのデプロイを参考にhelmをインストールしてみました。

Helm v3のすゝめ がより実践的?

helmのインストール方法が異なるが、その後の手順はほぼ同じ(以下、enabel helm3でインストールした場合)?

$ helm search hub prometheus のコマンドは;
$ microk8s.helm3 search hub prometheus のように読み替え

リポジトリを追加する

$ micro8ks.helm3 repo add stable https://charts.helm.sh/stable
$ microk8s.helm3 repo add bitnami https://charts.bitnami.com/bitnami

追加したレポジトリのリスト

$ microk8s helm3 repo list
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /var/snap/microk8s/2265/credentials/client.config
NAME    URL
stable  https://charts.helm.sh/stable
bitnami https://charts.bitnami.com/bitnami

リポジトリ内のChartを検索する

$ microk8s helm3 search repo wordpress
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /var/snap/microk8s/2265/credentials/client.config
NAME                    CHART VERSION   APP VERSION     DESCRIPTION
bitnami/wordpress       11.0.16         5.7.2           Web publishing platform for building blogs and ...
stable/wordpress        9.0.3           5.3.2           DEPRECATED Web publishing platform for building...
  • helm search hubHelm HubのChartを検索できます。
  • helm install コマンドの--version引数にChartのバージョンを指定できますので、任意のバージョンのChartをデプロイすることも可能です。helm pullコマンドでChartをローカルにダウンロードできます。
    ダウンロードしたChartはお好きに書き換えてデプロイできるので、Chartに用意されているパラメタで変更できないような設定も変更できます。

アプリケーションをデプロイする

# namespaceを作成
$ kubectl create namespace helm-test
# dry-run
$ helm install test stable/prometheus --namespace helm-test --dry-run
# デプロイ
$ helm install stable/prometheus --name test --namespace helm-test
# 確認
$ helm list -n helm-test
$ kubectl get po -n helm-test

dry-runで表示された情報

$ microk8s helm3 install test bitnami/wordpress --namespace helm-test --dry-run
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /var/snap/microk8s/2265/credentials/client.config
NAME: test
LAST DEPLOYED: Tue Jun 22 10:03:29 2021
NAMESPACE: helm-test
STATUS: pending-install
REVISION: 1
HOOKS:
---
# Source: wordpress/templates/tests/test-mariadb-connection.yaml
apiVersion: v1
kind: Pod
metadata:
  name: "test-credentials-test"
  annotations:
    "helm.sh/hook": test-success
spec:
  securityContext:
    fsGroup: 1001
  containers:
    - name: test-credentials-test
      image: docker.io/bitnami/wordpress:5.7.2-debian-10-r25
      imagePullPolicy: "IfNotPresent"
      securityContext:
        runAsNonRoot: true
        runAsUser: 1001
      env:
        - name: MARIADB_HOST
          value: "test-mariadb"
        - name: MARIADB_PORT
          value: "3306"
        - name: WORDPRESS_DATABASE_NAME
          value: "bitnami_wordpress"
        - name: WORDPRESS_DATABASE_USER
          value: "bn_wordpress"
        - name: WORDPRESS_DATABASE_PASSWORD
          valueFrom:
            secretKeyRef:
              name: test-mariadb
              key: mariadb-password
      command:
        - /bin/bash
        - -ec
        - |
          mysql --host=$MARIADB_HOST --port=$MARIADB_PORT --user=$WORDPRESS_DATABASE_USER --password=$WORDPRESS_DATABASE_PASSWORD
  restartPolicy: Never
MANIFEST:
---
# Source: wordpress/charts/mariadb/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: test-mariadb
  namespace: helm-test
  labels:
    app.kubernetes.io/name: mariadb
    helm.sh/chart: mariadb-9.3.14
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
  annotations:
---
# Source: wordpress/charts/mariadb/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
  name: test-mariadb
  namespace: helm-test
  labels:
    app.kubernetes.io/name: mariadb
    helm.sh/chart: mariadb-9.3.14
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
type: Opaque
data:
  mariadb-root-password: "eGMyb0NNWXZVUg=="
  mariadb-password: "SFc1WlkwNWpsMw=="
---
# Source: wordpress/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
  name: test-wordpress
  namespace: "helm-test"
  labels:
    app.kubernetes.io/name: wordpress
    helm.sh/chart: wordpress-11.0.16
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
type: Opaque
data:
  wordpress-password: "TVJteHlENlFtQQ=="
---
# Source: wordpress/charts/mariadb/templates/primary/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: test-mariadb
  namespace: helm-test
  labels:
    app.kubernetes.io/name: mariadb
    helm.sh/chart: mariadb-9.3.14
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: primary
data:
  my.cnf: |-
    [mysqld]
    skip-name-resolve
    explicit_defaults_for_timestamp
    basedir=/opt/bitnami/mariadb
    plugin_dir=/opt/bitnami/mariadb/plugin
    port=3306
    socket=/opt/bitnami/mariadb/tmp/mysql.sock
    tmpdir=/opt/bitnami/mariadb/tmp
    max_allowed_packet=16M
    bind-address=0.0.0.0
    pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
    log-error=/opt/bitnami/mariadb/logs/mysqld.log
    character-set-server=UTF8
    collation-server=utf8_general_ci

    [client]
    port=3306
    socket=/opt/bitnami/mariadb/tmp/mysql.sock
    default-character-set=UTF8
    plugin_dir=/opt/bitnami/mariadb/plugin

    [manager]
    port=3306
    socket=/opt/bitnami/mariadb/tmp/mysql.sock
    pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
---
# Source: wordpress/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-wordpress
  namespace: "helm-test"
  labels:
    app.kubernetes.io/name: wordpress
    helm.sh/chart: wordpress-11.0.16
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
spec:
  accessModes:
    - "ReadWriteOnce"
  resources:
    requests:
      storage: "10Gi"
---
# Source: wordpress/charts/mariadb/templates/primary/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: test-mariadb
  namespace: helm-test
  labels:
    app.kubernetes.io/name: mariadb
    helm.sh/chart: mariadb-9.3.14
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: primary
  annotations:
spec:
  type: ClusterIP
  ports:
    - name: mysql
      port: 3306
      protocol: TCP
      targetPort: mysql
      nodePort: null
  selector:
    app.kubernetes.io/name: mariadb
    app.kubernetes.io/instance: test
    app.kubernetes.io/component: primary
---
# Source: wordpress/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: test-wordpress
  namespace: "helm-test"
  labels:
    app.kubernetes.io/name: wordpress
    helm.sh/chart: wordpress-11.0.16
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
spec:
  type: LoadBalancer
  externalTrafficPolicy: "Cluster"
  ports:
    - name: http
      port: 80
      protocol: TCP
      targetPort: http
    - name: https
      port: 443
      protocol: TCP
      targetPort: https
  selector:
    app.kubernetes.io/name: wordpress
    app.kubernetes.io/instance: test
---
# Source: wordpress/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: test-wordpress
  namespace: "helm-test"
  labels:
    app.kubernetes.io/name: wordpress
    helm.sh/chart: wordpress-11.0.16
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: wordpress
      app.kubernetes.io/instance: test
  strategy:
    rollingUpdate: {}
    type: RollingUpdate
  replicas: 1
  template:
    metadata:
      labels:
        app.kubernetes.io/name: wordpress
        helm.sh/chart: wordpress-11.0.16
        app.kubernetes.io/instance: test
        app.kubernetes.io/managed-by: Helm
    spec:

      serviceAccountName: default
      # yamllint disable rule:indentation
      hostAliases:
        - hostnames:
          - status.localhost
          ip: 127.0.0.1
      # yamllint enable rule:indentation
      affinity:
        podAffinity:

        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - podAffinityTerm:
                labelSelector:
                  matchLabels:
                    app.kubernetes.io/name: wordpress
                    app.kubernetes.io/instance: test
                namespaces:
                  - "helm-test"
                topologyKey: kubernetes.io/hostname
              weight: 1
        nodeAffinity:

      securityContext:
        fsGroup: 1001
      containers:
        - name: wordpress
          image: docker.io/bitnami/wordpress:5.7.2-debian-10-r25
          imagePullPolicy: "IfNotPresent"
          securityContext:
            runAsNonRoot: true
            runAsUser: 1001
          env:
            - name: ALLOW_EMPTY_PASSWORD
              value: "yes"
            - name: MARIADB_HOST
              value: "test-mariadb"
            - name: MARIADB_PORT_NUMBER
              value: "3306"
            - name: WORDPRESS_DATABASE_NAME
              value: "bitnami_wordpress"
            - name: WORDPRESS_DATABASE_USER
              value: "bn_wordpress"
            - name: WORDPRESS_DATABASE_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: test-mariadb
                  key: mariadb-password
            - name: WORDPRESS_USERNAME
              value: "user"
            - name: WORDPRESS_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: test-wordpress
                  key: wordpress-password
            - name: WORDPRESS_EMAIL
              value: "user@example.com"
            - name: WORDPRESS_FIRST_NAME
              value: "FirstName"
            - name: WORDPRESS_LAST_NAME
              value: "LastName"
            - name: WORDPRESS_HTACCESS_OVERRIDE_NONE
              value: "no"
            - name: WORDPRESS_ENABLE_HTACCESS_PERSISTENCE
              value: "no"
            - name: WORDPRESS_BLOG_NAME
              value: "User's Blog!"
            - name: WORDPRESS_SKIP_BOOTSTRAP
              value: "no"
            - name: WORDPRESS_TABLE_PREFIX
              value: "wp_"
            - name: WORDPRESS_SCHEME
              value: "http"
            - name: WORDPRESS_EXTRA_WP_CONFIG_CONTENT
              value:
            - name: WORDPRESS_AUTO_UPDATE_LEVEL
              value: "none"
            - name: WORDPRESS_PLUGINS
              value: "none"
          envFrom:
          ports:
            - name: http
              containerPort: 8080
            - name: https
              containerPort: 8443
          livenessProbe:
            failureThreshold: 6
            httpGet:
              httpHeaders: []
              path: /wp-admin/install.php
              port: http
              scheme: HTTP
            initialDelaySeconds: 120
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
          readinessProbe:
            failureThreshold: 6
            httpGet:
              httpHeaders: []
              path: /wp-login.php
              port: http
              scheme: HTTP
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
          resources:
            limits: {}
            requests:
              cpu: 300m
              memory: 512Mi
          volumeMounts:
            - mountPath: /bitnami/wordpress
              name: wordpress-data
              subPath: wordpress
      volumes:
        - name: wordpress-data
          persistentVolumeClaim:
            claimName: test-wordpress
---
# Source: wordpress/charts/mariadb/templates/primary/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: test-mariadb
  namespace: helm-test
  labels:
    app.kubernetes.io/name: mariadb
    helm.sh/chart: mariadb-9.3.14
    app.kubernetes.io/instance: test
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: primary
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app.kubernetes.io/name: mariadb
      app.kubernetes.io/instance: test
      app.kubernetes.io/component: primary
  serviceName: test-mariadb
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      annotations:
        checksum/configuration: ba8296f4257f44a12c500b7f1720b6f3c44eb6b885a21e83bc3175cf4859939f
      labels:
        app.kubernetes.io/name: mariadb
        helm.sh/chart: mariadb-9.3.14
        app.kubernetes.io/instance: test
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: primary
    spec:

      serviceAccountName: test-mariadb
      affinity:
        podAffinity:

        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - podAffinityTerm:
                labelSelector:
                  matchLabels:
                    app.kubernetes.io/name: mariadb
                    app.kubernetes.io/instance: test
                    app.kubernetes.io/component: primary
                namespaces:
                  - "helm-test"
                topologyKey: kubernetes.io/hostname
              weight: 1
        nodeAffinity:

      securityContext:
        fsGroup: 1001
      containers:
        - name: mariadb
          image: docker.io/bitnami/mariadb:10.5.10-debian-10-r18
          imagePullPolicy: "IfNotPresent"
          securityContext:
            runAsUser: 1001
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: MARIADB_ROOT_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: test-mariadb
                  key: mariadb-root-password
            - name: MARIADB_USER
              value: "bn_wordpress"
            - name: MARIADB_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: test-mariadb
                  key: mariadb-password
            - name: MARIADB_DATABASE
              value: "bitnami_wordpress"
          ports:
            - name: mysql
              containerPort: 3306
          livenessProbe:
            failureThreshold: 3
            initialDelaySeconds: 120
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
            exec:
              command:
                - /bin/bash
                - -ec
                - |
                  password_aux="${MARIADB_ROOT_PASSWORD:-}"
                  if [[ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]]; then
                      password_aux=$(cat "$MARIADB_ROOT_PASSWORD_FILE")
                  fi
                  mysqladmin status -uroot -p"${password_aux}"
          readinessProbe:
            failureThreshold: 3
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
            exec:
              command:
                - /bin/bash
                - -ec
                - |
                  password_aux="${MARIADB_ROOT_PASSWORD:-}"
                  if [[ -f "${MARIADB_ROOT_PASSWORD_FILE:-}" ]]; then
                      password_aux=$(cat "$MARIADB_ROOT_PASSWORD_FILE")
                  fi
                  mysqladmin status -uroot -p"${password_aux}"
          resources:
            limits: {}
            requests: {}
          volumeMounts:
            - name: data
              mountPath: /bitnami/mariadb
            - name: config
              mountPath: /opt/bitnami/mariadb/conf/my.cnf
              subPath: my.cnf
      volumes:
        - name: config
          configMap:
            name: test-mariadb
  volumeClaimTemplates:
    - metadata:
        name: data
        labels:
          app.kubernetes.io/name: mariadb
          app.kubernetes.io/instance: test
          app.kubernetes.io/component: primary
      spec:
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: "8Gi"

NOTES:
** Please be patient while the chart is being deployed **

Your WordPress site can be accessed through the following DNS name from within your cluster:

    test-wordpress.helm-test.svc.cluster.local (port 80)

To access your WordPress site from outside the cluster follow the steps below:

1. Get the WordPress URL by running these commands:

  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
        Watch the status with: 'kubectl get svc --namespace helm-test -w test-wordpress'

   export SERVICE_IP=$(kubectl get svc --namespace helm-test test-wordpress --template "{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}")
   echo "WordPress URL: http://$SERVICE_IP/"
   echo "WordPress Admin URL: http://$SERVICE_IP/admin"

2. Open a browser and access WordPress using the obtained URL.

3. Login with the following credentials below to see your blog:

  echo Username: user
  echo Password: $(kubectl get secret --namespace helm-test test-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode)

ストレージの有効化

 microk8s enable storage

Enabling default storage class
[sudo] mars のパスワード: 

deployment.apps/hostpath-provisioner created
storageclass.storage.k8s.io/microk8s-hostpath created
serviceaccount/microk8s-hostpath created
clusterrole.rbac.authorization.k8s.io/microk8s-hostpath created
clusterrolebinding.rbac.authorization.k8s.io/microk8s-hostpath created
Storage will be available soon

Kubernetes IDEであるLensをMicroK8sで使う を参考にlensをインストール

sudo snap install kontena-lens --classic

kontena-lensを起動

kontena-lens 

info: 📟 Setting Lens as protocol client for lens://
info: 📟 failed ❗
info: 🚀 Starting Lens from "/home/mars/snap/kontena-lens/179/.config/Lens"
info: 🐚 Syncing shell environment
info: 💾 Loading stores
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 2.0.0-beta.2
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 2.4.1
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 2.6.0-beta.2
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 2.6.0-beta.3
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 2.7.0-beta.0
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 2.7.0-beta.1
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 3.6.0-beta.1
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json): 4.2.2

Migrating embedded kubeconfig paths
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/lens-cluster-store.json
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/lens-extensions.json
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/lens-filesystem-provisioner-store.json
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-workspace-store.json): 4.2.0-beta.1
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/lens-workspace-store.json
STORE MIGRATION (/home/mars/snap/kontena-lens/179/.config/Lens/lens-user-store.json): 2.1.0-beta.4
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/lens-user-store.json
info: 🔑 Getting free port for LensProxy server
info: 🔌 Starting LensProxy
info: [LENS-PROXY]: Proxy server has started at http://localhost:45293
info: 🔎 Testing LensProxy connection ...
error: ENOENT: no such file or directory, open '/home/mars/.kube/config' {"errno":-2,"code":"ENOENT","syscall":"open","path":"/home/mars/.kube/config"}
info: ⚡ LensProxy connection OK
info: 🖥️  Starting WindowManager
info: 🧩 Initializing extensions
info: [EXTENSION-DISCOVERY] loading extensions from /home/mars/snap/kontena-lens/179/.config/Lens

(kontena-lens:1373066): libappindicator-WARNING **: 08:48:20.134: Using '/tmp' paths in SNAP environment will lead to unreadable resources
info: [EXTENSION-INSTALLER] installing dependencies at /home/mars/snap/kontena-lens/179/.config/Lens
info: [WINDOW-MANAGER]: Loading Main window from url: http://localhost:45293 ...
info: [EXTENSION-INSTALLER] dependencies installed at /home/mars/snap/kontena-lens/179/.config/Lens
info: [EXTENSION-DISCOVERY] watching extension add/remove in /home/mars/.k8slens/extensions
info: [EXTENSION]: enabled lens-license@0.1.0
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/extension-store/lens-survey/preferences-store.json
info: [EXTENSION]: enabled lens-survey@0.1.0
telemetry main extension activated
info: [STORE]: LOADED from /home/mars/snap/kontena-lens/179/.config/Lens/extension-store/lens-telemetry/preferences-store.json
info: [EXTENSION]: enabled lens-telemetry@0.1.0
info: [WINDOW-MANAGER]: Main window loaded
info: 📡 Checking for app updates
info: Checking for update
error: Error: Error: ENOENT: no such file or directory, open '/snap/kontena-lens/179/resources/app-update.yml'
error: [UPDATE-CHECKER]: failed with an error {"error":"Error: ENOENT: no such file or directory, open '/snap/kontena-lens/179/resources/app-update.yml'"}

raspberry pi4でTansorFlow-Lightを使ってみた

このサイトの手順に従ってインストール:

https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Raspberry_Pi_Guide.md

カメラ動画の認識実行例:

航空機の映像が小さい(遠方の目標)と、凧(kite)や鳥(bird)として誤認識されることが多い。再生速度を遅くしてご覧ください。

手順の全般

  • 1a. Update the Raspberry Pi
  • 1b. パッケージをrepository からダウンロードし、仮想環境を作成
  • 1c. TensorFlow and OpenCVその他必要なライブラリーのインストール
  • 1d. TensorFlow Lite detection modelのセットアップ
  • 1e. TensorFlow Lite model!の実行

Step 1a. Update the Raspberry Pi

sudo apt-get update
sudo apt-get dist-upgrade

Step 1b. リポジトリからダウンロードして仮想環境を作成

$ git clone https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi.git

ディレクトリー名が長いので、短めな名称にリネーム
$ mv TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi tflite1
$ cd tflite1

virtualenvを利用した仮想環境を構築:
$sudo pip3 install virtualenv
次のコマンドで仮想環境 "tflite1-env" を作成
$ python3 -m venv tflite1-env

"tflite1-env"の活性化
$ source tflite1-env/bin/activate

Step 1c. Install TensorFlow Lite dependencies and OpenCV

$ bash get_pi_requirements.sh
次のURLから、自分の環境にあったバージョンを選んでインストールする。
https://github.com/google-coral/pycoral/releases/
例えば、python3.8 arm64bitの場合
$pip3 install pip3 install https://github.com/google-coral/pycoral/re
leases/download/v1.0.1/tflite_runtime-2.5.0-cp38-cp38-linux_aarch64.whl

Step 1d. Set up TensorFlow Lite detection model

認識のモデルをスクラッチから作るのは大変なので、ここではGoogle’s sampleをダウンロードして拝借

$ wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip
$ unzip coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip -d Sample_TFLite_model

Step 1e. TensorFlow Lite modelの実行

静止画像、動画、カメラ入力などを対象としたpythonスクリプトが用意されていますが、とりあえず、同梱されているtest.mp4動画でテスト。

$ python3 TFLite_detection_video.py --modeldir=Sample_TFLite_model

別の動画を指定する場合, --videoオプションでファイル名(パス)を指定する。
$ python3 TFLite_detection_video.py --video 動画のファイル名 --modeldir=Sample_TFLite_model

youtubeから拾ってきた岐阜航空祭の動画で試してみたら、思った以上に良好に検出してくれました。(稀に航空機を凧、鳥と誤認識)

Webカメラを利用する場合は;

$ python3 TFLite_detection_webcam.py --modeldir=Sample_TFLite_model

Googleの学習済のモデルには、数十種類の認識対象が含まれている。対象のリストはSample_TFLite_modelディレクトリーの中に、labelmap.txtという名称で入っている。同じディレクトリー内に、ファイルdetect.tfliteがあり、これが学習済のデータ(バイナリー)のようだ。

TFLite_detection_video.pyスクリプトを少し改変して、例えば航空機を検出した場合に限定して、検出枠の座標を取り出すこともできたので、これまでに実装したステップモータやサーボモータでカメラを動かす実験と合体させてみたい。

更新:サーボモータで追尾するコード

import time
import math
import datetime
import cv2
import pigpio
import queue
import numpy as np
import sys
from threading import Thread
import importlib.util
import os

face_cascade_path = '/home/pi/opencv/data/haarcascades/haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(face_cascade_path)
usleep = lambda x: time.sleep(x/1000000.0)

TILT=17
PAN=27
RPi=False
GP=pigpio.pi('localhost',8880)
GP.set_mode(PAN,pigpio.OUTPUT)
GP.set_mode(TILT,pigpio.OUTPUT)

# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
    """Camera object that controls video streaming from the Picamera"""
    def __init__(self,resolution=(640,480),framerate=30):
        # Initialize the PiCamera and the camera image stream
        self.stream = cv2.VideoCapture(0)
        #self.stream = cv2.VideoCapture('rtsp://admin:@192.168.68.128:554/1/h264major')
        ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
        ret = self.stream.set(3,resolution[0])
        ret = self.stream.set(4,resolution[1])

        # Read first frame from the stream
        (self.grabbed, self.frame) = self.stream.read()

        # Variable to control when the camera is stopped
        self.stopped = False

    def start(self):
        # Start the thread that reads frames from the video stream
        Thread(target=self.update,args=()).start()
        return self

    def update(self):
        # Keep looping indefinitely until the thread is stopped
        while True:
            # If the camera is stopped, stop the thread
            if self.stopped:
                # Close camera resources
                self.stream.release()
                return

            # Otherwise, grab the next frame from the stream
            (self.grabbed, self.frame) = self.stream.read()

    def read(self):
        # Return the most recent frame
        return self.frame

    def stop(self):
        # Indicate that the camera and thread should be stopped
        self.stopped = True
        
def move(p0,p1,dev):
    global tPos,pPos
    global tMin,tMax,pMin,pMax
    if dev==PAN:
        if p1 > pMax or p1 < pMin:
            return
    else:
        if p1 > tMax or p1 < tMin:
            return
            
    deg=p0
    dx=0.4
    counts=int(abs(p1-p0)/dx)
    if p1<p0:
        dx=-dx
    for i in range(0,counts):
        deg=deg+dx
        pw=500+int(deg*2000/270)
        GP.set_servo_pulsewidth(dev,pw)
        #time.sleep(0.005)
        #GP.set_servo_pulsewidth(dev,0)
        if dev==TILT:   
            tPos=deg
        else:
            pPos=deg

def key(k):
    global pPos,tPos,PAN,TILT,track,f_all
    global capture,fontFace,color,Green,Red

    if k == ord('j'):
        new=pPos+2
        move(pPos,new,PAN)
        return
    elif k == ord('k'):
        new=pPos-2
        move(pPos,new,PAN)
        return
    elif k == ord('m'):
        new=tPos-2
        move(tPos,new,TILT)
        return
    elif k == ord('i'):
        new=tPos+2
        move(tPos,new,TILT)
        return
    elif k == ord('p'):
        tmp=input()
        move(pPos,int(tmp),PAN)
    elif k == ord('t'):
        tmp=input()
        move(tPos,int(tmp),TILT)
    elif k == ord('a'):
        f_all = not f_all
    elif k == ord('f'):
        track = not(track)
        if  track:
            color=Red
        else:
            color=Green
 
    elif k == ord('z'):
        move(tPos,0,TILT)
        move(pPos,90,PAN)

def tracking(dX,dY):
    global xW,yW,pPos,tPos,tW
    ret=False
    if dX >0 :
        move(pPos,pPos+1,PAN)
    elif dX < 0:
        move(pPos,pPos-1,PAN)
    if dY > 0:
        move(tPos,tPos+1,TILT)
    elif dY < 0:
        move(tPos,tPos-1,TILT)
    return ret

# 移動体検知
def detectMOV(tm, tc):
    global avg,  img1,frame
    ret = False
    x,y=0,0
    if avg is None:
        avg = img1.copy().astype("float")
    else:
        cv2.accumulateWeighted(img1, avg, 0.5)
        frameDelta = cv2.absdiff(img1, cv2.convertScaleAbs(avg))
        thresh = cv2.threshold(frameDelta, tm,  255, cv2.THRESH_BINARY)[1]
        #cv2.imshow('th',thresh)
        contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    #    contours=cv2.drawContours(img,contours,-1,(0,255,0),2)
        for i in range(0,len(contours)):
            if len(contours[i]) > 0:
                 if cv2.contourArea(contours[i]) > tc:
                    rect = contours[i]
                    x, y, w, h = cv2.boundingRect(rect)
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
                    ret=True
                        
    return ret,x,y

   
def detect_face(frame,gray):
    global xW,yW,xC,yC
    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
    xC,yC=xW,yW
    for x, y, w, h in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        face = frame[y: y + h, x: x + w]
        try:
            xC,yC=x+w/2,y+h/2
            dX,dY=xC-xW,yC-yW
            if track:
                if(abs(dX)>tW or (abs(dY)>tW)):
                    tracking(dX,dY)
        except:
            xC,yC=xW,yW    
        
if __name__ == "__main__":

    avg=None
    tc=350     # Minimum area  moving detection
    tm=10      # Threshold vale to BINARY
    before = None
    tPos,pPos=0,0
    track=False
    fontFace =cv2.FONT_HERSHEY_SIMPLEX
    Red=(0,0,255)
    Blue=(255,0,0)
    Green=(0,255,0)
    TGT=['airplane','bird','kite']
    #capture = cv2.VideoCapture(1)
    #
    tMin,tMax=0,90  # minimum/Maximum setting for TILT 
    pMin,pMax=0,180 # minimum/Maximum setting for PAN
    move(tPos,0,TILT)
    move(pPos,0,PAN)
    wMax=50
    f_count=wMax

    f_all=True
    MODEL_NAME = 'Sample_TFLite_model'
    GRAPH_NAME = 'detect.tflite'
    LABELMAP_NAME = 'labelmap.txt'
    min_conf_threshold = 0.5
    #resW, resH =1280,720
    resW, resH =640,480
    imW, imH = int(resW), int(resH)
    use_TPU = False
    size=(resW, resH)
    Cx=int(resW/2)
    Cy=int(resH/2)
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
    pkg = importlib.util.find_spec('tflite_runtime')
    if pkg:
        from tflite_runtime.interpreter import Interpreter
        if use_TPU:
            from tflite_runtime.interpreter import load_delegate
    else:
        from tensorflow.lite.python.interpreter import Interpreter
        if use_TPU:
            from tensorflow.lite.python.interpreter import load_delegate

    # If using Edge TPU, assign filename for Edge TPU model
    if use_TPU:
        # If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
        if (GRAPH_NAME == 'detect.tflite'):
            GRAPH_NAME = 'edgetpu.tflite'

    # Get path to current working directory
    CWD_PATH = os.getcwd()

    # Path to .tflite file, which contains the model that is used for object detection
    PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)

    # Path to label map file
    PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)

    # Load the label map
    with open(PATH_TO_LABELS, 'r') as f:
        labels = [line.strip() for line in f.readlines()]

    # Have to do a weird fix for label map if using the COCO "starter model" from
    # https://www.tensorflow.org/lite/models/object_detection/overview
    # First label is '???', which has to be removed.
    if labels[0] == '???':
        del(labels[0])

    # Load the Tensorflow Lite model.
    # If using Edge TPU, use special load_delegate argument
    if use_TPU:
        interpreter = Interpreter(model_path=PATH_TO_CKPT,
                                  experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
        print(PATH_TO_CKPT)
    else:
        interpreter = Interpreter(model_path=PATH_TO_CKPT)

    interpreter.allocate_tensors()

    # Get model details
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]
    W,H = width,height
    xW,yW =int( W/2),int(H/2)
    tW=W/80         # minimum offcenter distance

    floating_model = (input_details[0]['dtype'] == np.float32)

    input_mean = 127.5
    input_std = 127.5
    move(tPos,20,TILT)
    move(pPos,120,PAN)
    # Initialize frame rate calculation
    frame_rate_calc = 1
    freq = cv2.getTickFrequency()

    # Initialize video stream
    videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
    time.sleep(1)
    frame_rate = 24.0 # フレームレート
    now=datetime.datetime.now().strftime("%Y%m%d_%H%M")
    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') # ファイル形式(ここではmp4)
    writer = cv2.VideoWriter('SV_'+now+'.mp4', fmt, frame_rate, size) # ライター作成
    frames=0
    #for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
    while True:
        now=datetime.datetime.now().strftime("%Y%m%d_%H:%M:%S")
        # Start timer (for calculating frame rate)
        t1 = cv2.getTickCount()

        # Grab frame from video stream
        frame1 = videostream.read()

        # Acquire frame and resize to expected shape [1xHxWx3]
        frame = frame1.copy()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(frame_rgb, (width, height))
        input_data = np.expand_dims(frame_resized, axis=0)

        # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
        if floating_model:
            input_data = (np.float32(input_data) - input_mean) / input_std

        # Perform the actual detection by running the model with the image as input
        interpreter.set_tensor(input_details[0]['index'],input_data)
        interpreter.invoke()

        # Retrieve detection results
        boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
        classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
        scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
        #num = interpreter.get_tensor(output_details[3]['index'])[0]  # Total number of detected objects (inaccurate and not needed)
        # Draw framerate in corner of frame
        msg='FPS: {0:.2f}'.format(frame_rate_calc)
        msg = msg + ' Track:'+str(track)+ ' F:' + str(frames) + ' T:'+ str(f_all) + ' ' + now
        cv2.putText(frame,msg,(30,50),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,0),1,cv2.LINE_AA)
        # Loop over all detections and draw detection box if confidence is above minimum threshold
        for i in range(len(scores)):
            object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
            if f_all or (object_name in TGT):
                if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
                    # Get bounding box coordinates and draw box
                    # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                    ymin = int(max(1,(boxes[i][0] * imH)))
                    xmin = int(max(1,(boxes[i][1] * imW)))
                    ymax = int(min(imH,(boxes[i][2] * imH)))
                    xmax = int(min(imW,(boxes[i][3] * imW)))
                    x,y=Cx,Cy
                    if (xmax-xmin)*(ymax-ymin)<10000:
                        x=xmin+int((xmax-xmin)*0.5)
                        y=ymin+int((ymax-ymin)*0.5)
                        if f_all:
                            # Draw label
                            label = '%s: %d%%' % (object_name, int(scores[i]*100))
                            labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
                            label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
                            cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
                            cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
                            cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
                        else:
                            x=xmin+int((xmax-xmin)*0.5)
                            y=ymin+int((ymax-ymin)*0.5)
                            cv2.circle(frame,(x,y),4,color=Green,thickness=1)
                            cv2.circle(frame,(x,y),10,color=Green,thickness=1)
                            cv2.circle(frame,(x,y),16,color=Green,thickness=1)
                            frames=frames+1
                            writer.write(frame)
                            f_count=wMax
                        dW = Cx - x
                        dH = Cy - y
                        msg='dW:'+str(dW)+' dH:'+ str(dH)
                        cv2.putText(frame, msg, (30, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 1) 
                        #msg=tgt_track(track,dH,dW,tH,tW,ptz,moverequest)
                        msg=tracking(dW,dH)

        # All the results have been drawn on the frame, so it's time to display it.
        cv2.imshow('Object detector', frame)
        if f_count>0 and f_count !=wMax:
            if not f_all:
                writer.write(frame)
        f_count=f_count-1
        # Calculate framerate
        t2 = cv2.getTickCount()
        time1 = (t2-t1)/freq
        frame_rate_calc= 1/time1
        # Press 'q' to quit
        k=cv2.waitKey(1) & 0xFF
        key(k)       
        if k == ord('q'):
            break
        for i in range(5):
            frame1 = videostream.read()
    # Clean up
    cv2.destroyAllWindows()
    videostream.stop()
    if writer is not None:
        writer.release()
    move(tPos,10,TILT)
    move(pPos,90,PAN)
    GP.stop()
    print('Finish!')

8x8サーモイメージャー(AMG8833)の画像表示とパルスオキシメータをm5stackに繋いでみる。温度補正に対象までの距離のデータも必要なので、I2Cデバイス3個を接続(作業中)

AMG8833を利用してサーモイメージャーを作ってみる。

左からDiymore MAX30102 心拍数センサーモジュール、8x8サーモイメージャー(AMG8833)、VL53L0X (ToF) レーザー測距センサ

センサーサイズ8×8の情報をbicubic補間でなめらかに表示。