想い出ざわざわ

画像処理・機械学習を学びます。

SSD 受容野について

"""Keras implementation of SSD."""

import keras.backend as K
from keras.layers import Activation
from keras.layers import AtrousConvolution2D
from keras.layers import Convolution2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import concatenate
from keras.layers import Reshape
from keras.layers import ZeroPadding2D
from keras.models import Model

from ssd_layers import Normalize
from ssd_layers import PriorBox


def SSD300(input_shape, num_classes=21):
"""SSD300 architecture.

# Arguments
input_shape: Shape of the input image,
expected to be either (300, 300, 3) or (3, 300, 300)(not tested).
num_classes: Number of classes including background.

# References
"""
net = {}
# Block 1
input_tensor = input_tensor = Input(shape=input_shape)
img_size = (input_shape[1], input_shape[0])
net['input'] = input_tensor
net['conv1_1'] = Convolution2D(64, 3, 3,
activation='relu',
border_mode='same',
name='conv1_1')(net['input'])
net['conv1_2'] = Convolution2D(64, 3, 3,
activation='relu',
border_mode='same',
name='conv1_2')(net['conv1_1'])
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool1')(net['conv1_2'])
# Block 2
net['conv2_1'] = Convolution2D(128, 3, 3,
activation='relu',
border_mode='same',
name='conv2_1')(net['pool1'])
net['conv2_2'] = Convolution2D(128, 3, 3,
activation='relu',
border_mode='same',
name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool2')(net['conv2_2'])
# Block 3
net['conv3_1'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_1')(net['pool2'])
net['conv3_2'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Convolution2D(256, 3, 3,
activation='relu',
border_mode='same',
name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool3')(net['conv3_3'])
# Block 4
net['conv4_1'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_1')(net['pool3'])
net['conv4_2'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_2')(net['conv4_1'])
net['conv4_3'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same',
name='pool4')(net['conv4_3'])
# Block 5
net['conv5_1'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_1')(net['pool4'])
net['conv5_2'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Convolution2D(512, 3, 3,
activation='relu',
border_mode='same',
name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), border_mode='same',
name='pool5')(net['conv5_3'])
# FC6
net['fc6'] = AtrousConvolution2D(1024, 3, 3, atrous_rate=(6, 6),
activation='relu', border_mode='same',
name='fc6')(net['pool5'])
# x = Dropout(0.5, name='drop6')(x)
# FC7
net['fc7'] = Convolution2D(1024, 1, 1, activation='relu',
border_mode='same', name='fc7')(net['fc6'])
# x = Dropout(0.5, name='drop7')(x)
# Block 6
net['conv6_1'] = Convolution2D(256, 1, 1, activation='relu',
border_mode='same',
name='conv6_1')(net['fc7'])
net['conv6_2'] = Convolution2D(512, 3, 3, subsample=(2, 2),
activation='relu', border_mode='same',
name='conv6_2')(net['conv6_1'])
# Block 7
net['conv7_1'] = Convolution2D(128, 1, 1, activation='relu',
border_mode='same',
name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
net['conv7_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
activation='relu', border_mode='valid',
name='conv7_2')(net['conv7_2'])
# Block 8
net['conv8_1'] = Convolution2D(128, 1, 1, activation='relu',
border_mode='same',
name='conv8_1')(net['conv7_2'])
net['conv8_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
activation='relu', border_mode='same',
name='conv8_2')(net['conv8_1'])
# Last Pool
net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
# Prediction from conv4_3
net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])
num_priors = 3
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])
net['conv4_3_norm_mbox_loc'] = x
flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')
net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])
name = 'conv4_3_norm_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv4_3_norm'])
net['conv4_3_norm_mbox_conf'] = x
flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')
net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])
priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv4_3_norm_mbox_priorbox')
net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])
# Prediction from fc7
num_priors = 6
net['fc7_mbox_loc'] = Convolution2D(num_priors * 4, 3, 3,
border_mode='same',
name='fc7_mbox_loc')(net['fc7'])
flatten = Flatten(name='fc7_mbox_loc_flat')
net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
name = 'fc7_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
net['fc7_mbox_conf'] = Convolution2D(num_priors * num_classes, 3, 3,
border_mode='same',
name=name)(net['fc7'])
flatten = Flatten(name='fc7_mbox_conf_flat')
net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='fc7_mbox_priorbox')
net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
# Prediction from conv6_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv6_2_mbox_loc')(net['conv6_2'])
net['conv6_2_mbox_loc'] = x
flatten = Flatten(name='conv6_2_mbox_loc_flat')
net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
name = 'conv6_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv6_2'])
net['conv6_2_mbox_conf'] = x
flatten = Flatten(name='conv6_2_mbox_conf_flat')
net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv6_2_mbox_priorbox')
net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
# Prediction from conv7_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv7_2_mbox_loc')(net['conv7_2'])
net['conv7_2_mbox_loc'] = x
flatten = Flatten(name='conv7_2_mbox_loc_flat')
net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
name = 'conv7_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv7_2'])
net['conv7_2_mbox_conf'] = x
flatten = Flatten(name='conv7_2_mbox_conf_flat')
net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv7_2_mbox_priorbox')
net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])
# Prediction from conv8_2
num_priors = 6
x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
name='conv8_2_mbox_loc')(net['conv8_2'])
net['conv8_2_mbox_loc'] = x
flatten = Flatten(name='conv8_2_mbox_loc_flat')
net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])
name = 'conv8_2_mbox_conf'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
name=name)(net['conv8_2'])
net['conv8_2_mbox_conf'] = x
flatten = Flatten(name='conv8_2_mbox_conf_flat')
net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])
priorbox = PriorBox(img_size, 222.0, max_size=276.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv8_2_mbox_priorbox')
net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])
# Prediction from pool6
num_priors = 6
x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])
net['pool6_mbox_loc_flat'] = x
name = 'pool6_mbox_conf_flat'
if num_classes != 21:
name += '_{}'.format(num_classes)
x = Dense(num_priors * num_classes, name=name)(net['pool6'])
net['pool6_mbox_conf_flat'] = x
priorbox = PriorBox(img_size, 276.0, max_size=330.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='pool6_mbox_priorbox')
if K.image_dim_ordering() == 'tf':
target_shape = (1, 1, 256)
else:
target_shape = (256, 1, 1)
net['pool6_reshaped'] = Reshape(target_shape,
name='pool6_reshaped')(net['pool6'])
net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])
# Gather all predictions
net['mbox_loc'] = concatenate([net['conv4_3_norm_mbox_loc_flat'],
net['fc7_mbox_loc_flat'],
net['conv6_2_mbox_loc_flat'],
net['conv7_2_mbox_loc_flat'],
net['conv8_2_mbox_loc_flat'],
net['pool6_mbox_loc_flat']],
axis=1, name='mbox_loc')
net['mbox_conf'] = concatenate([net['conv4_3_norm_mbox_conf_flat'],
net['fc7_mbox_conf_flat'],
net['conv6_2_mbox_conf_flat'],
net['conv7_2_mbox_conf_flat'],
net['conv8_2_mbox_conf_flat'],
net['pool6_mbox_conf_flat']],
axis=1, name='mbox_conf')
net['mbox_priorbox'] = concatenate([net['conv4_3_norm_mbox_priorbox'],
net['fc7_mbox_priorbox'],
net['conv6_2_mbox_priorbox'],
net['conv7_2_mbox_priorbox'],
net['conv8_2_mbox_priorbox'],
net['pool6_mbox_priorbox']],
axis=1,
name='mbox_priorbox')
if hasattr(net['mbox_loc'], '_keras_shape'):
num_boxes = net['mbox_loc']._keras_shape[-1] // 4
elif hasattr(net['mbox_loc'], 'int_shape'):
num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4
net['mbox_loc'] = Reshape((num_boxes, 4),
name='mbox_loc_final')(net['mbox_loc'])
net['mbox_conf'] = Reshape((num_boxes, num_classes),
name='mbox_conf_logits')(net['mbox_conf'])
net['mbox_conf'] = Activation('softmax',
name='mbox_conf_final')(net['mbox_conf'])
net['predictions'] = concatenate([net['mbox_loc'],
net['mbox_conf'],
net['mbox_priorbox']],
axis=2,
name='predictions')
model = Model(net['input'], net['predictions'])
return model
 

目指すこと

GTPBIoUを増やし、Recall向上

 

前提(どんなGTまで対象とするか)

x方向のみずれたGT

x,y方向にずれたGT

⇨任意のGTを検出したいため、後者を想定

 

方針(どうやってIoUを増やすか)

PBを大きくする・・・①

PBをずらして増やす・・・②

・①と②の併用

 

トレードオフ

①のメリット

・処理時間が増えにくい(PB数が増えにくい)

①のデメリット

GT中心が最悪位置のとき、②よりもIoUが小さい

 (PBそのものが大きいため)

最悪位置:FeatureMap1画素の4隅

②のメリット

GT中心が最悪位置のとき、①よりもIoUが大きい

②のデメリット

・処理時間が増えやすい(PBが多いため)

①と②共通のリスク

PBに必要な受容野が広がり、特徴を受容できないリスクがある。

 

・受容野について

http://joisino.hatenablog.com/entry/2017/07/13/210000

 

EC2でCNNを学習まで

https://aws.amazon.com/jp/about-aws/global-infrastructure/regional-product-services/ 2019/1/4でのサービス比較

 

http://nakano-tomofumi.hatenablog.com/entry/2018/04/05/094931 AWSリージョンのお勧めについて

 

https://qiita.com/KoronHM/items/24d7f79ea6417ab78621

AWSGPUを使った機械学習

 

https://www.shadan-kun.com/blog/measure/3410/

AWSの必須セキュリティ対策

 

https://qiita.com/noko_qii/items/d4c19ec5e891264af462

セキュリティグループについて調べてみた

 

https://qiita.com/hiroshik1985/items/f078a6a017d092a541cf

0から始めるAWS入門 EC2

 

AWSの消し忘れ防止 7

https://dev.classmethod.jp/cloud/aws/cost-check-point/

 

ECインスタンスに接続できない原因

http://otiai10.hatenablog.com/entry/2017/07/12/144645

pyenvとanaconda を共存させて、activateできない

activate をフルパスで実行すると解決できる。

・・・なぜ共存させたか覚えてない・・・

 

pyenvとanacondaを共存させる時のactivate衝突問題の回避策3種類

【VIM】ショートカット設定【タブ】

下記の通りに.vimrcに設定する。

" Anywhere SID.
function! s:SID_PREFIX()
  return matchstr(expand('<sfile>'), '<SNR>\d\+_\zeSID_PREFIX$')
endfunction

" Set tabline.
function! s:my_tabline()  "{{{
  let s = ''
  for i in range(1, tabpagenr('$'))
    let bufnrs = tabpagebuflist(i)
    let bufnr = bufnrs[tabpagewinnr(i) - 1]  " first window, first appears
    let no = i  " display 0-origin tabpagenr.
    let mod = getbufvar(bufnr, '&modified') ? '!' : ' '
    let title = fnamemodify(bufname(bufnr), ':t')
    let title = '[' . title . ']'
    let s .= '%'.i.'T'
    let s .= '%#' . (i == tabpagenr() ? 'TabLineSel' : 'TabLine') . '#'
    let s .= no . ':' . title
    let s .= mod
    let s .= '%#TabLineFill# '
  endfor
  let s .= '%#TabLineFill#%T%=%#TabLine#'
  return s
endfunction "}}}
let &tabline = '%!'. s:SID_PREFIX() . 'my_tabline()'
set showtabline=2 " 常にタブラインを表示

" The prefix key.
nnoremap    [Tag]   <Nop>
nmap    t [Tag]
" Tab jump
for n in range(1, 9)
  execute 'nnoremap <silent> [Tag]'.n  ':<C-u>tabnext'.n.'<CR>'
endfor
" t1 で1番左のタブ、t2 で1番左から2番目のタブにジャンプ

map <silent> [Tag]c :tablast <bar> tabnew<CR>
" tc 新しいタブを一番右に作る
map <silent> [Tag]x :tabclose<CR>
" tx タブを閉じる
map <silent> [Tag]n :tabnext<CR>
" tn 次のタブ
map <silent> [Tag]p :tabprevious<CR>
" tp 前のタブ

 

すると、

  • t1, t2,,,t9 で左からn番目のタブにジャンプ
  • tc で新しいタブ, txでタブを閉じる

でタブページを効率的に使えます。

 

あまりにも便利だったので、転載させていただきました。

皆さんも使ってみてください。

qiita.com