メモです
あとここにいっぱいありそう
GitHub – tensorflow/models: Models built with TensorFlow
models – Models built with TensorFlow
これがどんぴしゃかもしれん
https://www.deepdetect.com/applications/model/
メモです
あとここにいっぱいありそう
models – Models built with TensorFlow
これがどんぴしゃかもしれん
https://www.deepdetect.com/applications/model/
時間があるときにまとめるよてい
ファッションデータセット
https://sites.google.com/site/fashionparsing/dataset
東北大の研究
http://vision.is.tohoku.ac.jp/~kyamagu/ja/research/clothing_parsing/
これはなんだろう?
https://github.com/applebym/project5_final
ファッションアイテム検索(コードつき?)
http://gigazine.net/news/20161027-fashion-snap-detection-retrieval/
??これは見つけない方が良かったのか,,,まんま?
https://github.com/rivukhoda/claridrobe
ん?JSでAPIでつかえるっぽい.ブラウザで出来てしまう?
https://developer.clarifai.com/quick-start/
クライアントIDとパスをパーミッションかけたJSファイルにして,読まれないようにする必要があります.
http://kivantium.hateblo.jp/entry/2015/11/18/233834
ゴール後に前を向く場合は,以下の構造でスクリプトが必要
なお,ゴール前に前を向かせるのは金ちゃん走りになるので,上半身のボーンにマスクを書け,別の制御にする必要があると思う
using UnityEngine;
using System.Collections;
public class heading : MonoBehaviour {
//initialize variable of nav mesh
//initialize variable of bool
//default heading
// Use this for initialization
void Start () {
}
// Update is called once per frame
void Update () {
//get navmesh info every frame
//if goal this nave mesh
//then heding call
}
//new function heading call
//if custom heading
//heading to custom vector
//else no custom heading
//heading to default vector
}
作成中
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using UnityStandardAssets.Characters.ThirdPerson;
public class myscript : MonoBehaviour {
public GameObject myInstance;//for prefab
public GameObject myGoal;//for goal object
public int sakusei = 10;//chara duplicate count
public GameObject targetbj;//GameObject of chara target
Transform targetPos;//位置情報用の変数
GameObject NavObj;//NavMeshのついているGameObject
NavMeshAgent myNav;//NavMeshAgent入れ用
List<Vector3> myPoint = new List<Vector3>();//ゴール地点リスト用
// Use this for initialization
void Start () {
myPoint = new List<Vector3>();//リスト初期化
myPoint.Add (new Vector3 (0.0f, 0.5f, -5.0f));//リスト項目追加
myPoint.Add (new Vector3 (-20f, 0.5f, 14f));
myPoint.Add (new Vector3 (20f, 0.5f, 14f));
//ゴール地点3個作成用(テスト)
GameObject goalObj = Instantiate (myGoal, myPoint [0], Quaternion.identity) as GameObject;
goalObj.name = "goal1";
goalObj = Instantiate (myGoal, myPoint [1], Quaternion.identity) as GameObject;
goalObj.name = "goal2";
goalObj = Instantiate (myGoal, myPoint [2], Quaternion.identity) as GameObject;
goalObj.name = "goal3";
//キャラn体作成
for (int i = 0; i < sakusei; i++) {
GameObject go = Instantiate (myInstance, new Vector3 (i + 1.0f, 0, 0), Quaternion.identity) as GameObject;
string myAIname = "AI" + i.ToString ();
go.name = myAIname;
int divideInt = i % 3;
if (divideInt == 0) {
GameObject my1 = GameObject.Find("goal1");
targetPos = my1.GetComponent<Transform> ();
}else if (divideInt == 1) {
GameObject my2 = GameObject.Find("goal2");
targetPos = my2.GetComponent<Transform> ();
}else if (divideInt == 2) {
GameObject my3 = GameObject.Find("goal3");
targetPos = my3.GetComponent<Transform> ();
}
myNav = NavObj.GetComponent<NavMeshAgent> ();//Get Nav mesh from current object
myNav.SetDestination (targetPos.position);//set goal pos of myNav
myNav.stoppingDistance = 3.0f;//offset distance from goal point...korenaito guriguri suru
AICharacterControl myTar = NavObj.GetComponent<AICharacterControl>();//find component of NabObj
myTar.target = targetPos;//set Goal variable
}
}
// tsukotenai
void Update () {
}
}
AI10mm=1Mayaunit(10mm)=0.01UnityUnit(10mm)
unityインポート後にScale Factorを100倍にすると1mになる
10m×10mの建物を作成する際は、
100分の1 10cm×10cmで図面を作成し、Unity読み込み時に100倍する.
opencvはいってなかった
http://qiita.com/suppy193/items/91609e75789e9f458c39
でOpencv2.7いれて
http://arkouji.cocolog-nifty.com/blog/2016/08/tensorflowraspb.html
でclassfy_image.pyがない(そもそもmodelsディレクトリがない)ので、TensorFlowのソースみて
nano classify_image.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
image = (FLAGS.image_file if FLAGS.image_file else
os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
run_inference_on_image(image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='',
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
で、
python classify_image.py
やって
giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca (score = 0.89107)
indri, indris, Indri indri, Indri brevicaudatus (score = 0.00779)
lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens (score = 0.00296)
custard apple (score = 0.00147)
earthstar (score = 0.00117)
パンダ89% インドリ(サル)0.7%、レッサーパンダ0.2%、custard apple(リンゴの仲間?)0.14%、アーススターって謎の花0.11% じゃないって出る
自分で作る参考
http://arkouji.cocolog-nifty.com/blog/2016/08/tensorflow-76e9.html
このサイトとそこにのってる参考サイト
http://qiita.com/khayate/items/bb7c61f447b4c579ddd1
わかりやすい解説
WEBアプリ化する
いろいろ入れるの
http://qiita.com/PonDad/items/9fbdf4d694f825dd1b6e
面倒なので、最近なにかと話題のTesor Flowにしてみる
インストはこっち
http://tech.mof-mof.co.jp/blog/tensorflow-tutorial.html
参考
https://github.com/samjabrahams/tensorflow-on-raspberry-pi
追記 インストはできったっぽい
動作確認はここ
http://qiita.com/mix_dvd/items/6b38859148a988c3fe06
エラー出たので
sudo pip install –upgrade html5lib==1.0b8
したら、またエラー
sudo pip install -U pandas
した
python mnist_softmax.py
やって、
Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz 0.9193
でた。たぶんこれでOK
ーーおまけーー
ブラウザでできるとかなんとk
http://qiita.com/payashim/items/d4fe5227b21a5215e78b
Chainerも考えたけど、まぁ簡単そうなのから。
deep celief SDK