程序员深夜用 Python 跑神经网络,只为用中二动作关掉台灯
【摘要】 编译:啤酒泡泡、曹培信来源:大数据文摘(ID:BigDataDigest)原文:MakeArtWithPython对于上了床就再也不想下来的人来说,关灯成为睡觉前面临的最大挑战!然而,一个来自意大利拉不勒斯的小哥哥,决定利用“舞步”(身体姿势)来控制自己家的灯,整个过程利用一个神经网络实现。此前,关于关灯这件事,这一届网友永远不会让人失望,他们开发出了各种关灯***:当然少不了憨豆先生最简单粗...
对于上了床就再也不想下来的人来说,关灯成为睡觉前面临的最大挑战!然而,一个来自意大利拉不勒斯的小哥哥,决定利用“舞步”(身体姿势)来控制自己家的灯,整个过程利用一个神经网络实现。
dabs = []
tposes = []
other = []
fps_time = 0
# loop forever, reading webcam each time
while True:
ret_val, frame = vs.read()
datum.cvInputData = frame
opWrapper.emplaceAndPop([datum])
# need to be able to see what's going on
image = datum.cvOutputData
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.imshow("Openpose", image)
fps_time = time.time()
# quit with a q keypress, b or m to save data
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
elif key == ord("b"):
print("Dab: " + str(datum.poseKeypoints))
dabs.append(datum.poseKeypoints[0])
elif key == ord("m"):
print("TPose: " + str(datum.poseKeypoints))
tposes.append(datum.poseKeypoints[0])
elif key == ord("/"):
print("Other: " + str(datum.poseKeypoints))
other.append(datum.poseKeypoints[0])
# write our data as numpy binary files
# for analysis later
dabs = np.asarray(dabs)
tposes = np.asarray(tposes)
other = np.asarray(other)
np.save('dabs.npy', dabs)
np.save('tposes.npy', tposes)
np.save('other.npy', other)
labels = np.append(labels, np.full((len(dabDataset)), 1))
labels = np.append(labels, np.full((len(tposeDataset)), 2))
print(labels)
print("%i total examples for training." % len(labels))
from sklearn.utils import shuffle
X1, y1 = shuffle(dataset, labels)
# now let's label them for 'one hot'
from keras.utils.np_utils import to_categorical
y1 = to_categorical(y1, 3) # we have 3 categories, dab, tpose, other
print(y1.shape[1)]
X1[:,:,1] = X1[:,:,1] / 1280 # let's see?
X1 = X1[:,:,1:]
print(X1.shape)
X1 = X1.reshape(len(X1), 50) # we got rid of confidence percentage
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(50,)))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(y1.shape[1], activation='softmax'))
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X1, y1, epochs=2000,batch_size=25)
# in our other code, or inline, load the model and test against sample dab dataset
import keras
modello = keras.models.load_model('data/dab-tpose-other.h5')
dabDataset = np.load('data/test-dabs.npy')
dabDataset[:,:,0] = dabDataset[:,:,0] / 720 # I think the dimensions are 1280 x 720 ?
dabDataset[:,:,1] = dabDataset[:,:,1] / 1280 # let's see?
dabDataset = dabDataset[:,:,1:]
dabDataset = dabDataset.reshape(len(dabDataset), 50)
modello.predict_classes(dabDataset) # returns array([1, 1, 1, 1, 1, 1])
import pyopenpose as op
from imutils import translate, rotate, resize
import openzwave
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
# make sure these commands get flushed by doing them first, then loading tensorflow...
# tensorflow should take enough time to start for these commands to flush
options = ZWaveOption('/dev/ttyACM0')
options.lock()
network = ZWaveNetwork(options)
import time
import numpy as np
np.random.seed(1337)
import tensorflow as tf
# make sure tensorflow doesn't take up all the gpu memory
conf = tf.ConfigProto()
conf.gpu_options.allow_growth=True
session = tf.Session(config=conf)
import keras
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../../models/"
# built in TX2 video capture source
vs = cv2.VideoCapture("nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)NV12, framerate=(fraction)24/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
tposer = keras.models.load_model('dab-tpose-other.h5')
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
datum = op.Datum()
np.set_printoptions(precision=4)
fps_time = 0
DAB = 1
TPOSE = 2
OTHER = 0
LIGHTS = 0
bounced = time.time()
debounce = 3 # wait 3 seconds before allowing another command
while True:
ret_val, frame = vs.read()
datum.cvInputData = frame
opWrapper.emplaceAndPop([datum])
# need to be able to see what's going on
image = datum.cvOutputData
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.imshow("Openpose", image)
if datum.poseKeypoints.any():
first_input = datum.poseKeypoints
try:
first_input[:,:,0] = first_input[:,:,0] / 720
first_input[:,:,1] = first_input[:,:,1] / 1280
first_input = first_input[:,:,1:]
first_input = first_input.reshape(len(datum.poseKeypoints), 50)
except:
continue
output = tposer.predict_classes(first_input)
for j in output:
if j == 1:
print("dab detected")
if LIGHTS == 0 or (time.time() - bounced) < debounce:
continue
for node in network.nodes:
for val in network.nodes[node].get_switches():
network.nodes[node].set_switch(val, False)
LIGHTS = 0
bounced = time.time()
elif j == 2:
print("tpose detected")
if LIGHTS == 1 or (time.time() - bounced) < debounce:
continue
for node in network.nodes:
for val in network.nodes[node].get_switches():
network.nodes[node].set_switch(val, True)
LIGHTS = 1
bounced = time.time()
fps_time = time.time()
# quit with a q keypress, b or m to save data
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# clean up after yourself
vs.release()
cv2.destroyAllWindows()
【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)