From 1f372942a9a69195233cc49a37bada0d241cb8ae Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Thu, 9 Jul 2015 11:57:05 +0200 Subject: [PATCH 01/79] Initial version of Fast-RCNN. Work in progress --- FRCNN.lua | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++ ROIPooling.lua | 51 ++++++++++++++++++++++++++++++++++++++ data.lua | 2 ++ opts.lua | 1 + test.lua | 13 ++++++++++ 5 files changed, 134 insertions(+) create mode 100644 FRCNN.lua create mode 100644 ROIPooling.lua create mode 100644 test.lua diff --git a/FRCNN.lua b/FRCNN.lua new file mode 100644 index 0000000..8ab8527 --- /dev/null +++ b/FRCNN.lua @@ -0,0 +1,67 @@ +local FRCNN = torch.class('nnf.FRCNN') + +function FRCNN:__init(dataset) + self.dataset = dataset + + self.scale = {600} + self.max_dim = 1000 + self.randomscale = true + + --self.sz_conv_standard = 13 + self.step_standard = 16 + --self.offset0 = 21 + --self.offset = 6.5 + + --self.inputArea = 224^2 + +end + +local function rgb2bgr(I) + local out = I.new():resizeAs(I) + for i=1,I:size(1) do + out[i] = I[I:size(1)+1-i] + end + return out +end + +local function prepareImage(I,typ) + local typ = typ or 1 + local mean_pix = typ == 1 and {128.,128.,128.} or {103.939, 116.779, 123.68} + local I = I + if I:dim() == 2 then + I = I:view(1,I:size(1),I:size(2)) + end + if I:size(1) == 1 then + I = I:expand(3,I:size(2),I:size(3)) + end + I = rgb2bgr(I):mul(255) + for i=1,3 do + I[i]:add(-mean_pix[i]) + end + return I +end + +function FRCNN:getScale(I) + local min_size = math.min(I[2],I[3]) + local max_size = math.max(I[2],I[3]) + local scale + if max_size <= self.max_dim then + scale = self.scale[1]/min_size + else + scale = self.max_dim/max_size + end + return scale +end + +function FRCNN:projectBBoxes(bboxes,scale) + return (bboxes-1)*scale+1 +end + +function FRCNN:getFeatures(i,flip) + local I = self.dataset:getImage(i) + local bboxes = self.dataset:attachProposals(i) + I = prepareImage(I) + if flip then + + end +end diff --git a/ROIPooling.lua b/ROIPooling.lua new file mode 100644 index 0000000..23a5606 --- /dev/null +++ b/ROIPooling.lua @@ -0,0 +1,51 @@ +local ROIPooling,parent = torch.class('nnf.ROIPooling','nn.Module') + +function ROIPooling:__init(W,H) + parent.__init(self) + self.W = W + self.H = H + self.pooler = {}--nn.SpatialAdaptiveMaxPooling(W,H) +end + +-- not for batches for the moment +function ROIPooling:updateOutput(input) + local data = input[1] + local rois = input[2] + local num_rois = rois:size(1) + local s = data:size() + local ss = s:size(1) + self.output:resize(num_rois,s[ss-2],self.H,self.W) + + if #self.pooler < num_rois then + local diff = num_rois - #self.pooler + for i=1,diff do + table.insert(self.pooler,nn.SpatialAdaptiveMaxPooling(self.W,self.H)) + end + end + + for i=1,num_rois do + local roi = rois[i] + local im = data[{{},{roi[2],roi[4]},{roi[1],roi[3]}}] + self.output[i] = self.pooler[i]:forward(im) + end + return self.output +end + +function ROIPooling:updateGradInput(input,gradOutput) + local data = input[1] + local rois = input[2] + local num_rois = rois:size(1) + local s = data:size() + local ss = s:size(1) + self.gradInput:resizeAs(data):zero() + + for i=1,num_rois do + local roi = rois[i] + local r = {{},{roi[2],roi[3]},{roi[1],roi[3]}} + local im = data[r] + local g = self.pooler[i]:backward(im,gradOutput[i]) + self.gradInput[r]:add(g) + end + return self.gradInput + +end diff --git a/data.lua b/data.lua index 655deb5..f1cb9d5 100644 --- a/data.lua +++ b/data.lua @@ -51,6 +51,7 @@ else batch_provider = nnf.BatchProvider(feat_provider) batch_provider.iter_per_batch = opt.ipb batch_provider.nTimesMoreData = opt.ntmd + batch_provider.batch_size = opt.batch_size batch_provider.fg_fraction = opt.fg_frac batch_provider.bg_threshold = {0.0,0.5} batch_provider.do_flip = true @@ -89,6 +90,7 @@ else batch_provider_test = nnf.BatchProvider(feat_provider_test) batch_provider_test.iter_per_batch = 500--opt.ipb batch_provider_test.nTimesMoreData = 10--opt.ntmd + batch_provider_test.batch_size = opt.batch_size batch_provider_test.fg_fraction = opt.fg_frac batch_provider_test.bg_threshold = {0.0,0.5} batch_provider_test.do_flip = false diff --git a/opts.lua b/opts.lua index f07d8dc..3665874 100644 --- a/opts.lua +++ b/opts.lua @@ -36,6 +36,7 @@ function M.parse(arg) cmd:option('-nsmooth',40,'number of iterations before reducing learning rate') cmd:option('-nred',4,'number of divisions by 2 before stopping learning') cmd:option('-nildfdx',false,'erase memory of gradients when reducing learning rate') + cmd:option('-batch_size',128,'batch size') cmd:text() cmd:text('Others') cmd:option('-gpu',1,'gpu device to use') diff --git a/test.lua b/test.lua new file mode 100644 index 0000000..6057a07 --- /dev/null +++ b/test.lua @@ -0,0 +1,13 @@ +require 'nn' +nnf = {} +dofile 'ROIPooling.lua' + +m = nnf.ROIPooling(3,3) + +t = {torch.rand(1,10,10),torch.Tensor({{1,1,5,5},{2,3,7,8},{6,4,8,8},{6,4,10,10},{8,8,10,10}})} -- +g = torch.rand(t[2]:size(1),1,3,3) + +o = m:forward(t) +gg = m:backward(t,g) + + From 344a9bbd5f55468266681a803c87b3e26f18e8fe Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Fri, 7 Aug 2015 09:36:11 +0200 Subject: [PATCH 02/79] Add ROIDataLayer, still incomplete --- ROIDataLayer.lua | 62 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 ROIDataLayer.lua diff --git a/ROIDataLayer.lua b/ROIDataLayer.lua new file mode 100644 index 0000000..c13a22c --- /dev/null +++ b/ROIDataLayer.lua @@ -0,0 +1,62 @@ +local ROIDataLayer,parent = torch.class('nnf.ROIDataLayer','nnf.BatchProvider') + +function ROIDataLayer:__init(dataset) + parent.__init(self) + self.dataset = dataset + self.image_transformer + self.imgs_per_batch = 2 + self.scale = 600 + self.max_size = 1000 +end + +local function shuffle_roidb_inds(self) + self._perm = torch.randperm(self.dataset:size()) + self._curr = 0 +end + +local function get_next_minibatch_inds(self) + if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb) then + self:shuffle_roidb_inds() + end + + local db_inds = self._perm[{{self._cur,self._cur + self.imgs_per_batch}}] + self._cur = self._cur + self.imgs_per_batch + return db_inds +end + + +function ROIDataLayer:getBatch() + local dataset = self.dataset + local img_ids = self:get_next_minibatch_inds() + + local num_images = img_ids:size(1) + local imgs = {} + local im_sizes = {} + -- get images + -- prep_im_for_blob + for i=1,num_images do + local im = dataset:getImage(img_ids[i]) + im = self.image_transformer:preprocess(im) + local im_size = im[1]:size() + local im_size_min = math.min(im_size[1],im_size[2]) + local im_size_max = math.max(im_size[1],im_size[2]) + local im_scale = self.scale/im_size_min + if torch.round(im_scale*im_size_max) > self.max_size then + im_scale = self.max_size/im_size_max + end + local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} + table.insert(imgs,image.scale(im,im_s[2],im_s[1])) + table.insert(im_sizes,im_s) + end + -- create single tensor with all images, padding with zero for different sizes + im_sizes = torch.IntTensor(im_sizes) + local max_shape = im_sizes:max(1) + local images = torch.FloatTensor(num_images,3,max_shape[1],max_shape[2]) + for i=1,num_images do + images[i][{{1,imgs[i]:size(2)},{imgs[i]:size(3)}}]:copy(imgs[i]) + end + + return images +end + + From c5d456c19f6fa7f5cbfd2ef94d51f44ed4cf07a2 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 13 Aug 2015 11:03:26 +0200 Subject: [PATCH 03/79] Commiting what we have for the moment --- FRCNN.lua | 25 ------------------------- ROIDataLayer.lua | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/FRCNN.lua b/FRCNN.lua index 8ab8527..6fd8061 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -16,31 +16,6 @@ function FRCNN:__init(dataset) end -local function rgb2bgr(I) - local out = I.new():resizeAs(I) - for i=1,I:size(1) do - out[i] = I[I:size(1)+1-i] - end - return out -end - -local function prepareImage(I,typ) - local typ = typ or 1 - local mean_pix = typ == 1 and {128.,128.,128.} or {103.939, 116.779, 123.68} - local I = I - if I:dim() == 2 then - I = I:view(1,I:size(1),I:size(2)) - end - if I:size(1) == 1 then - I = I:expand(3,I:size(2),I:size(3)) - end - I = rgb2bgr(I):mul(255) - for i=1,3 do - I[i]:add(-mean_pix[i]) - end - return I -end - function FRCNN:getScale(I) local min_size = math.min(I[2],I[3]) local max_size = math.max(I[2],I[3]) diff --git a/ROIDataLayer.lua b/ROIDataLayer.lua index c13a22c..9ba9eef 100644 --- a/ROIDataLayer.lua +++ b/ROIDataLayer.lua @@ -32,6 +32,7 @@ function ROIDataLayer:getBatch() local num_images = img_ids:size(1) local imgs = {} local im_sizes = {} + local im_scales = {} -- get images -- prep_im_for_blob for i=1,num_images do @@ -47,6 +48,7 @@ function ROIDataLayer:getBatch() local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} table.insert(imgs,image.scale(im,im_s[2],im_s[1])) table.insert(im_sizes,im_s) + table.insert(im_scales,im_scale) end -- create single tensor with all images, padding with zero for different sizes im_sizes = torch.IntTensor(im_sizes) @@ -56,7 +58,32 @@ function ROIDataLayer:getBatch() images[i][{{1,imgs[i]:size(2)},{imgs[i]:size(3)}}]:copy(imgs[i]) end + return images end +local function sample_rois(self,i) + + local dataset = self.dataset + local rec = dataset:attachProposals(i) + local fg_inds = {} + local bg_inds = {} + for j=1,rec:size() do + local id = rec.label[j] + local is_fg = (rec.overlap[j] >= self.fg_threshold) + local is_bg = (rec.overlap[j] >= self.bg_threshold[1] and + rec.overlap[j] < self.bg_threshold[2]) + if is_fg then + table.insert(fg_inds,j) + elseif is_bg then + table.insert(bg_inds,j) + end + end + + local fg_rois_per_this_image = math.min(#fg_inds,self.fg_) + if #fg_inds > 0 then + + end + +end From ea4dcd3c00906eccbf6ec9a38e395b277a9a147c Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 13 Aug 2015 17:22:52 +0200 Subject: [PATCH 04/79] Updating BatchProviderROI --- BatchProviderROI.lua | 141 +++++++++++++++++++++++++++++++++++++++++++ ROIDataLayer.lua | 89 --------------------------- nnf.lua | 5 +- 3 files changed, 144 insertions(+), 91 deletions(-) create mode 100644 BatchProviderROI.lua delete mode 100644 ROIDataLayer.lua diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua new file mode 100644 index 0000000..6f28865 --- /dev/null +++ b/BatchProviderROI.lua @@ -0,0 +1,141 @@ +local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProvider') + +function BatchProviderROI:__init(dataset) + local fp = {dataset=dataset} + parent:__init(fp) + self.imgs_per_batch = 2 + self.scale = 600 + self.max_size = 1000 + self.image_transformer = nnf.ImageTransformer{} +end + +-- setup is the same + +function BatchProviderROI:permuteIdx() + local fg_num_each = self.fg_num_each + local bg_num_each = self.bg_num_each + local fg_num_total = self.fg_num_total + local bg_num_total = self.bg_num_total + local total_img = self.dataset:size() + local imgs_per_batch = self.imgs_per_batch + + self._cur = self._cur or math.huge + + if self._cur + imgs_per_batch > total_img then + self._perm = torch.randperm(total_img) + self._cur = 1 + end + + local img_idx = self._perm[{{self._cur,self._cur + self.imgs_per_batch - 1}}] + self._cur = self._cur + self.imgs_per_batch + + local img_idx_end = imgs_per_batch + --[[ + local fg_windows = {} + local bg_windows = {} + for i=1,img_idx_end do + local curr_idx = img_idx[i] + bg_windows[i] = {} + if self.bboxes[curr_idx][0] then + for j=1,self.bboxes[curr_idx][0]:size(1) do + table.insert(bg_windows[i],{curr_idx,j}) + end + end + fg_windows[i] = {} + if self.bboxes[curr_idx][1] then + for j=1,self.bboxes[curr_idx][1]:size(1) do + table.insert(fg_windows[i],{curr_idx,j}) + end + end + end + --]] + local opts = {img_idx=img_idx,img_idx_end=img_idx_end} + return fg_windows,bg_windows,opts + +end + +function BatchProviderROI:selectBBoxes(fg_windows,bg_windows) + local fg_w = {} + local bg_w = {} + + for im=1,self.imgs_per_batch do + + fg_w[im] = {} + bg_w[im] = {} + + local window_idx = torch.randperm(#bg_windows[im]) + for i=1,math.min(self.bg_num_each,#bg_windows[im]) do + local curr_idx = bg_windows[im][window_idx[i] ][1] + local position = bg_windows[im][window_idx[i] ][2] + local dd = self.bboxes[curr_idx][0][position] + table.insert(bg_w[im],dd) + end + + window_idx = torch.randperm(#fg_windows[im]) + for i=1,math.min(self.fg_num_each,#fg_windows[im]) do + local curr_idx = fg_windows[im][window_idx[i] ][1] + local position = fg_windows[im][window_idx[i] ][2] + local dd = self.bboxes[curr_idx][1][position] + table.insert(fg_w[im],dd) + end + + end + + return fg_w,bg_w +end + +local function getImages(self,img_ids,images) + local dataset = self.dataset + local num_images = img_ids:size(1) + + local imgs = {} + local im_sizes = {} + local im_scales = {} + + for i=1,num_images do + local im = dataset:getImage(img_ids[i]) + im = self.image_transformer:preprocess(im) + local im_size = im[1]:size() + local im_size_min = math.min(im_size[1],im_size[2]) + local im_size_max = math.max(im_size[1],im_size[2]) + local im_scale = self.scale/im_size_min + if torch.round(im_scale*im_size_max) > self.max_size then + im_scale = self.max_size/im_size_max + end + local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} + table.insert(imgs,image.scale(im,im_s[2],im_s[1])) + table.insert(im_sizes,im_s) + table.insert(im_scales,im_scale) + end + -- create single tensor with all images, padding with zero for different sizes + im_sizes = torch.IntTensor(im_sizes) + local max_shape = im_sizes:max(1)[1] + images:resize(num_images,3,max_shape[1],max_shape[2]) + for i=1,num_images do + images[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) + end + return im_scales +end + + +function BatchProviderROI:getBatch(batches,targets) + local dataset = self.dataset + + self.fg_num_each = self.fg_fraction * self.batch_size + self.bg_num_each = self.batch_size - self.fg_num_each + --self.fg_num_total = self.fg_num_each * self.iter_per_batch + --self.bg_num_total = self.bg_num_each * self.iter_per_batch + + local fg_windows,bg_windows,opts = self:permuteIdx() + --local fg_w,bg_w = self:selectBBoxes(fg_windows,bg_windows) + + local batches = batches or {torch.FloatTensor(),torch.FloatTensor()} + local targets = targets or torch.IntTensor() + + -- batches[1]:resize(self.batch_size,unpack(self.batch_dim)) + local im_scales = getImages(self,opts.img_idx,batches[1]) + batches[2]:resize(self.batch_size,unpack(self.batch_dim)) + targets:resize(self.batch_size,self.target_dim) + + return batches, targets +end diff --git a/ROIDataLayer.lua b/ROIDataLayer.lua deleted file mode 100644 index 9ba9eef..0000000 --- a/ROIDataLayer.lua +++ /dev/null @@ -1,89 +0,0 @@ -local ROIDataLayer,parent = torch.class('nnf.ROIDataLayer','nnf.BatchProvider') - -function ROIDataLayer:__init(dataset) - parent.__init(self) - self.dataset = dataset - self.image_transformer - self.imgs_per_batch = 2 - self.scale = 600 - self.max_size = 1000 -end - -local function shuffle_roidb_inds(self) - self._perm = torch.randperm(self.dataset:size()) - self._curr = 0 -end - -local function get_next_minibatch_inds(self) - if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb) then - self:shuffle_roidb_inds() - end - - local db_inds = self._perm[{{self._cur,self._cur + self.imgs_per_batch}}] - self._cur = self._cur + self.imgs_per_batch - return db_inds -end - - -function ROIDataLayer:getBatch() - local dataset = self.dataset - local img_ids = self:get_next_minibatch_inds() - - local num_images = img_ids:size(1) - local imgs = {} - local im_sizes = {} - local im_scales = {} - -- get images - -- prep_im_for_blob - for i=1,num_images do - local im = dataset:getImage(img_ids[i]) - im = self.image_transformer:preprocess(im) - local im_size = im[1]:size() - local im_size_min = math.min(im_size[1],im_size[2]) - local im_size_max = math.max(im_size[1],im_size[2]) - local im_scale = self.scale/im_size_min - if torch.round(im_scale*im_size_max) > self.max_size then - im_scale = self.max_size/im_size_max - end - local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} - table.insert(imgs,image.scale(im,im_s[2],im_s[1])) - table.insert(im_sizes,im_s) - table.insert(im_scales,im_scale) - end - -- create single tensor with all images, padding with zero for different sizes - im_sizes = torch.IntTensor(im_sizes) - local max_shape = im_sizes:max(1) - local images = torch.FloatTensor(num_images,3,max_shape[1],max_shape[2]) - for i=1,num_images do - images[i][{{1,imgs[i]:size(2)},{imgs[i]:size(3)}}]:copy(imgs[i]) - end - - - return images -end - - -local function sample_rois(self,i) - - local dataset = self.dataset - local rec = dataset:attachProposals(i) - local fg_inds = {} - local bg_inds = {} - for j=1,rec:size() do - local id = rec.label[j] - local is_fg = (rec.overlap[j] >= self.fg_threshold) - local is_bg = (rec.overlap[j] >= self.bg_threshold[1] and - rec.overlap[j] < self.bg_threshold[2]) - if is_fg then - table.insert(fg_inds,j) - elseif is_bg then - table.insert(bg_inds,j) - end - end - - local fg_rois_per_this_image = math.min(#fg_inds,self.fg_) - if #fg_inds > 0 then - - end - -end diff --git a/nnf.lua b/nnf.lua index a2e7831..eee8d36 100644 --- a/nnf.lua +++ b/nnf.lua @@ -1,14 +1,15 @@ require 'nn' require 'image' -require 'inn' +--require 'inn' require 'xlua' nnf = {} torch.include('nnf','DataSetPascal.lua') torch.include('nnf','BatchProvider.lua') +torch.include('nnf','BatchProviderROI.lua') -torch.include('nnf','SPP.lua') +--torch.include('nnf','SPP.lua') torch.include('nnf','RCNN.lua') torch.include('nnf','Trainer.lua') From 6ac59e8dfd3dd2353986c2e3a4bb50ee20fdcd89 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 15 Aug 2015 18:38:24 +0200 Subject: [PATCH 05/79] Basics of Fast-RCNN seems to be working --- BatchProviderROI.lua | 46 ++++++++++++++++++++++++++------------------ ROIPooling.lua | 21 ++++++++++++++++---- model.lua | 11 +++++++++++ nnf.lua | 1 + 4 files changed, 56 insertions(+), 23 deletions(-) diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 6f28865..9b66e61 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -30,7 +30,7 @@ function BatchProviderROI:permuteIdx() self._cur = self._cur + self.imgs_per_batch local img_idx_end = imgs_per_batch - --[[ + local fg_windows = {} local bg_windows = {} for i=1,img_idx_end do @@ -48,40 +48,50 @@ function BatchProviderROI:permuteIdx() end end end - --]] + local opts = {img_idx=img_idx,img_idx_end=img_idx_end} return fg_windows,bg_windows,opts end -function BatchProviderROI:selectBBoxes(fg_windows,bg_windows) - local fg_w = {} - local bg_w = {} +function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) + --local fg_w = {} + --local bg_w = {} + local rois = {} + local labels = {} for im=1,self.imgs_per_batch do - fg_w[im] = {} - bg_w[im] = {} + local im_scale = im_scales[im] + --fg_w[im] = {} + --bg_w[im] = {} local window_idx = torch.randperm(#bg_windows[im]) for i=1,math.min(self.bg_num_each,#bg_windows[im]) do local curr_idx = bg_windows[im][window_idx[i] ][1] local position = bg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][0][position] - table.insert(bg_w[im],dd) + local dd = self.bboxes[curr_idx][0][position][{{2,5}}]--:totable() + dd:add(-1):mul(im_scale):add(1) + --table.insert(bg_w[im],dd) + table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) + table.insert(labels,self.bboxes[curr_idx][0][position][6]) end window_idx = torch.randperm(#fg_windows[im]) for i=1,math.min(self.fg_num_each,#fg_windows[im]) do local curr_idx = fg_windows[im][window_idx[i] ][1] local position = fg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][1][position] - table.insert(fg_w[im],dd) + local dd = self.bboxes[curr_idx][1][position][{{2,5}}]--:totable() + dd:add(-1):mul(im_scale):add(1) + --table.insert(fg_w[im],dd) + table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) + table.insert(labels,self.bboxes[curr_idx][1][position][6]) end - end - - return fg_w,bg_w + rois = torch.FloatTensor(rois) + labels = torch.IntTensor(labels) + --return fg_w,bg_w + return rois, labels end local function getImages(self,img_ids,images) @@ -123,8 +133,6 @@ function BatchProviderROI:getBatch(batches,targets) self.fg_num_each = self.fg_fraction * self.batch_size self.bg_num_each = self.batch_size - self.fg_num_each - --self.fg_num_total = self.fg_num_each * self.iter_per_batch - --self.bg_num_total = self.bg_num_each * self.iter_per_batch local fg_windows,bg_windows,opts = self:permuteIdx() --local fg_w,bg_w = self:selectBBoxes(fg_windows,bg_windows) @@ -132,10 +140,10 @@ function BatchProviderROI:getBatch(batches,targets) local batches = batches or {torch.FloatTensor(),torch.FloatTensor()} local targets = targets or torch.IntTensor() - -- batches[1]:resize(self.batch_size,unpack(self.batch_dim)) local im_scales = getImages(self,opts.img_idx,batches[1]) - batches[2]:resize(self.batch_size,unpack(self.batch_dim)) - targets:resize(self.batch_size,self.target_dim) + local rois,labels = self:selectBBoxes(fg_windows,bg_windows,im_scales) + batches[2]:resizeAs(rois):copy(rois) + targets:resizeAs(labels):copy(labels) return batches, targets end diff --git a/ROIPooling.lua b/ROIPooling.lua index 23a5606..9b0cb6f 100644 --- a/ROIPooling.lua +++ b/ROIPooling.lua @@ -5,9 +5,9 @@ function ROIPooling:__init(W,H) self.W = W self.H = H self.pooler = {}--nn.SpatialAdaptiveMaxPooling(W,H) + self.spatial_scale = 1 end --- not for batches for the moment function ROIPooling:updateOutput(input) local data = input[1] local rois = input[2] @@ -16,16 +16,19 @@ function ROIPooling:updateOutput(input) local ss = s:size(1) self.output:resize(num_rois,s[ss-2],self.H,self.W) + if not self._type then self._type = output:type() end + if #self.pooler < num_rois then local diff = num_rois - #self.pooler for i=1,diff do - table.insert(self.pooler,nn.SpatialAdaptiveMaxPooling(self.W,self.H)) + table.insert(self.pooler,nn.SpatialAdaptiveMaxPooling(self.W,self.H):type(self._type)) end end for i=1,num_rois do local roi = rois[i] - local im = data[{{},{roi[2],roi[4]},{roi[1],roi[3]}}] + local im_idx = roi[1] + local im = data[{im_idx,{},{roi[3],roi[5]},{roi[2],roi[4]}}] self.output[i] = self.pooler[i]:forward(im) end return self.output @@ -41,7 +44,8 @@ function ROIPooling:updateGradInput(input,gradOutput) for i=1,num_rois do local roi = rois[i] - local r = {{},{roi[2],roi[3]},{roi[1],roi[3]}} + local im_idx = roi[1] + local r = {im_idx,{},{roi[3],roi[5]},{roi[2],roi[4]}} local im = data[r] local g = self.pooler[i]:backward(im,gradOutput[i]) self.gradInput[r]:add(g) @@ -49,3 +53,12 @@ function ROIPooling:updateGradInput(input,gradOutput) return self.gradInput end + +function ROIPooling:type(type) + parent.type(self,type) + for i=1,#self.pooler do + self.pooler[i]:type(type) + end + self._type = type + return self +end diff --git a/model.lua b/model.lua index 9700f0b..36812e1 100644 --- a/model.lua +++ b/model.lua @@ -21,6 +21,17 @@ if opt.algo == 'RCNN' then elseif opt.algo == 'SPP' then features = model:get(1) classifier = model:get(3) +elseif opt.algo == 'FRCNN' then + local temp = nn.Sequential() + local features = model:get(1) + local classifier = model:get(3) + local prl = nn.ParallelTable() + prl:add(features) + prl:add(nn.Identity()) + temp:add(prl) + temp:add(nnf.ROIPooling(7,7)) + temp:add(nn.View(-1):setNumInputDims(3)) + temp:add(classifier) end -- 2. Create Criterion diff --git a/nnf.lua b/nnf.lua index eee8d36..2cf0592 100644 --- a/nnf.lua +++ b/nnf.lua @@ -11,6 +11,7 @@ torch.include('nnf','BatchProviderROI.lua') --torch.include('nnf','SPP.lua') torch.include('nnf','RCNN.lua') +torch.include('nnf','ROIPooling.lua') torch.include('nnf','Trainer.lua') torch.include('nnf','Tester.lua') From f9b514ca5efd6e4576649a57693ae21a07ef34f5 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 15 Aug 2015 19:49:08 +0200 Subject: [PATCH 06/79] Almost working --- BatchProviderROI.lua | 4 ++-- ROIPooling.lua | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 9b66e61..5492853 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -138,12 +138,12 @@ function BatchProviderROI:getBatch(batches,targets) --local fg_w,bg_w = self:selectBBoxes(fg_windows,bg_windows) local batches = batches or {torch.FloatTensor(),torch.FloatTensor()} - local targets = targets or torch.IntTensor() + local targets = targets or torch.FloatTensor() local im_scales = getImages(self,opts.img_idx,batches[1]) local rois,labels = self:selectBBoxes(fg_windows,bg_windows,im_scales) batches[2]:resizeAs(rois):copy(rois) - targets:resizeAs(labels):copy(labels) + targets:resize(labels:size()):copy(labels) return batches, targets end diff --git a/ROIPooling.lua b/ROIPooling.lua index 9b0cb6f..a3588b1 100644 --- a/ROIPooling.lua +++ b/ROIPooling.lua @@ -8,14 +8,26 @@ function ROIPooling:__init(W,H) self.spatial_scale = 1 end +function ROIPooling:setSpatialScale(scale) + self.spatial_scale = scale + return self +end + function ROIPooling:updateOutput(input) local data = input[1] local rois = input[2] + local num_rois = rois:size(1) local s = data:size() local ss = s:size(1) self.output:resize(num_rois,s[ss-2],self.H,self.W) + rois[{{},{2,5}}]:add(-1):mul(self.spatial_scale):add(1):round() + rois[{{},2}]:cmin(s[ss]) + rois[{{},3}]:cmin(s[ss-1]) + rois[{{},4}]:cmin(s[ss]) + rois[{{},5}]:cmin(s[ss-1]) + if not self._type then self._type = output:type() end if #self.pooler < num_rois then From 22e7b0e7c2ef3251c8947099e2db1d34542cff10 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 16 Aug 2015 21:14:23 +0200 Subject: [PATCH 07/79] Basic test for FRCNN --- test_frcnn.lua | 107 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 test_frcnn.lua diff --git a/test_frcnn.lua b/test_frcnn.lua new file mode 100644 index 0000000..f5cb124 --- /dev/null +++ b/test_frcnn.lua @@ -0,0 +1,107 @@ +require 'nnf' + +dt = torch.load('pascal_2007_train.t7') +ds = nnf.DataSetPascal{image_set='train', + datadir='/home/francisco/work/datasets/VOCdevkit', + roidbdir='/home/francisco/work/datasets/rcnn/selective_search_data' + } +if false then + ds.roidb = {} + for i=1,ds:size() do + ds.roidb[i] = torch.IntTensor(10,4):random(1,5) + ds.roidb[i][{{},{3,4}}]:add(6) + end +else + ds.roidb = dt.roidb +end + +bp = nnf.BatchProviderROI(ds) +bp:setupData() + +--------------------------------------------------------------------------------------- +-- model +--------------------------------------------------------------------------------------- +do + model = nn.Sequential() + local features = nn.Sequential() + local classifier = nn.Sequential() + + features:add(nn.SpatialConvolutionMM(3,96,11,11,4,4,5,5)) + features:add(nn.ReLU(true)) + features:add(nn.SpatialConvolutionMM(96,128,5,5,2,2,2,2)) + features:add(nn.ReLU(true)) + features:add(nn.SpatialMaxPooling(2,2,2,2)) + + classifier:add(nn.Linear(128*7*7,1024)) + classifier:add(nn.ReLU(true)) + classifier:add(nn.Dropout(0.5)) + classifier:add(nn.Linear(1024,21)) + + local prl = nn.ParallelTable() + prl:add(features) + prl:add(nn.Identity()) + model:add(prl) + model:add(nnf.ROIPooling(7,7):setSpatialScale(1/16)) + model:add(nn.View(-1):setNumInputDims(3)) + model:add(classifier) + +end + +parameters,gradParameters = model:getParameters() + +optimState = {learningRate = 1e-2, weightDecay = 0.0005, momentum = 0.9, + learningRateDecay = 0} + +-------------------------------------------------------------------------- +-- training +-------------------------------------------------------------------------- + +model:float() +model:training() + +criterion = nn.CrossEntropyCriterion():float() + +max_iter = 10 + +function train() + local err = 0 + for i=1,max_iter do + xlua.progress(i,max_iter) + inputs,target = bp:getBatch(inputs,target) + local batchSize = target:size(1) + + local feval = function(x) + if x ~= parameters then + parameters:copy(x) + end + gradParameters:zero() + + local outputs = model:forward(inputs) + + local f = criterion:forward(outputs,target) + local df_do = criterion:backward(outputs,target) + + model:backward(inputs,df_do) + + if normalize then + gradParameters:div(batchSize) + f = f/batchSize + end + + return f,gradParameters + end + + local x,fx = optim.sgd(feval,parameters,optimState) + err = err + fx[1] + end + print('Training error: '..err/max_iter) +end + +train() + +if false then + m = nnf.ROIPooling(50,50):float() + o = m:forward(batches) + g = m:backward(batches,o) +end + From bc6c6818c3af18ee9eb0c90fbdae7a53c1548728 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 17 Aug 2015 23:39:36 +0200 Subject: [PATCH 08/79] Fix bug in ROIPooling --- ROIPooling.lua | 10 +++++----- test_frcnn.lua | 8 +++++++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/ROIPooling.lua b/ROIPooling.lua index a3588b1..0345628 100644 --- a/ROIPooling.lua +++ b/ROIPooling.lua @@ -6,6 +6,7 @@ function ROIPooling:__init(W,H) self.H = H self.pooler = {}--nn.SpatialAdaptiveMaxPooling(W,H) self.spatial_scale = 1 + self.gradInput = {torch.Tensor()} end function ROIPooling:setSpatialScale(scale) @@ -41,7 +42,7 @@ function ROIPooling:updateOutput(input) local roi = rois[i] local im_idx = roi[1] local im = data[{im_idx,{},{roi[3],roi[5]},{roi[2],roi[4]}}] - self.output[i] = self.pooler[i]:forward(im) + self.output[i] = self.pooler[i]:updateOutput(im) end return self.output end @@ -52,18 +53,17 @@ function ROIPooling:updateGradInput(input,gradOutput) local num_rois = rois:size(1) local s = data:size() local ss = s:size(1) - self.gradInput:resizeAs(data):zero() + self.gradInput[1]:resizeAs(data):zero() for i=1,num_rois do local roi = rois[i] local im_idx = roi[1] local r = {im_idx,{},{roi[3],roi[5]},{roi[2],roi[4]}} local im = data[r] - local g = self.pooler[i]:backward(im,gradOutput[i]) - self.gradInput[r]:add(g) + local g = self.pooler[i]:updateGradInput(im,gradOutput[i]) + self.gradInput[1][r]:add(g) end return self.gradInput - end function ROIPooling:type(type) diff --git a/test_frcnn.lua b/test_frcnn.lua index f5cb124..cbca5ae 100644 --- a/test_frcnn.lua +++ b/test_frcnn.lua @@ -15,8 +15,14 @@ else ds.roidb = dt.roidb end +if false then bp = nnf.BatchProviderROI(ds) bp:setupData() +else + bp = nnf.BatchProviderROI(ds) + local temp = torch.load('pascal_2007_train_bp.t7') + bp.bboxes = temp.bboxes +end --------------------------------------------------------------------------------------- -- model @@ -61,7 +67,7 @@ model:training() criterion = nn.CrossEntropyCriterion():float() -max_iter = 10 +max_iter = 20 function train() local err = 0 From f6d53813be5e67fafc3a8d30959aa74754cf0ee3 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Tue, 18 Aug 2015 18:09:54 +0200 Subject: [PATCH 09/79] Improve training script for frcnn --- test_frcnn.lua | 114 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 32 deletions(-) diff --git a/test_frcnn.lua b/test_frcnn.lua index cbca5ae..61455d5 100644 --- a/test_frcnn.lua +++ b/test_frcnn.lua @@ -1,10 +1,18 @@ require 'nnf' dt = torch.load('pascal_2007_train.t7') -ds = nnf.DataSetPascal{image_set='train', - datadir='/home/francisco/work/datasets/VOCdevkit', - roidbdir='/home/francisco/work/datasets/rcnn/selective_search_data' - } +if false then + ds = nnf.DataSetPascal{image_set='train', + datadir='/home/francisco/work/datasets/VOCdevkit', + roidbdir='/home/francisco/work/datasets/rcnn/selective_search_data' + } +else + ds = nnf.DataSetPascal{image_set='train', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } +end + if false then ds.roidb = {} for i=1,ds:size() do @@ -15,9 +23,9 @@ else ds.roidb = dt.roidb end -if false then -bp = nnf.BatchProviderROI(ds) -bp:setupData() +if true then + bp = nnf.BatchProviderROI(ds) + bp:setupData() else bp = nnf.BatchProviderROI(ds) local temp = torch.load('pascal_2007_train_bp.t7') @@ -28,52 +36,89 @@ end -- model --------------------------------------------------------------------------------------- do + model = nn.Sequential() local features = nn.Sequential() local classifier = nn.Sequential() + + if false then + features:add(nn.SpatialConvolutionMM(3,96,11,11,4,4,5,5)) + features:add(nn.ReLU(true)) + features:add(nn.SpatialConvolutionMM(96,128,5,5,2,2,2,2)) + features:add(nn.ReLU(true)) + features:add(nn.SpatialMaxPooling(2,2,2,2)) + + classifier:add(nn.Linear(128*7*7,1024)) + classifier:add(nn.ReLU(true)) + classifier:add(nn.Dropout(0.5)) + classifier:add(nn.Linear(1024,21)) - features:add(nn.SpatialConvolutionMM(3,96,11,11,4,4,5,5)) - features:add(nn.ReLU(true)) - features:add(nn.SpatialConvolutionMM(96,128,5,5,2,2,2,2)) - features:add(nn.ReLU(true)) - features:add(nn.SpatialMaxPooling(2,2,2,2)) + else + require 'loadcaffe' +-- local rcnnfold = '/home/francisco/work/libraries/rcnn/' +-- local base_model = loadcaffe.load( +-- rcnnfold..'model-defs/pascal_finetune_deploy.prototxt', +-- rcnnfold..'data/caffe_nets/finetune_voc_2012_train_iter_70k', +-- 'cudnn') + + local rcnnfold = '/home/francisco/work/libraries/caffe/examples/imagenet/' + local base_model = loadcaffe.load( + rcnnfold..'imagenet_deploy.prototxt', + rcnnfold..'caffe_reference_imagenet_model', + 'cudnn') + + + for i=1,14 do + features:add(base_model:get(i):clone()) + end - classifier:add(nn.Linear(128*7*7,1024)) - classifier:add(nn.ReLU(true)) - classifier:add(nn.Dropout(0.5)) - classifier:add(nn.Linear(1024,21)) + for i=17,22 do + classifier:add(base_model:get(i):clone()) + end + classifier:add(nn.Linear(4096,21):cuda()) + + collectgarbage() + end + collectgarbage() local prl = nn.ParallelTable() prl:add(features) prl:add(nn.Identity()) model:add(prl) - model:add(nnf.ROIPooling(7,7):setSpatialScale(1/16)) + model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) + --model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) model:add(nn.View(-1):setNumInputDims(3)) model:add(classifier) end - +print(model) parameters,gradParameters = model:getParameters() -optimState = {learningRate = 1e-2, weightDecay = 0.0005, momentum = 0.9, +optimState = {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, learningRateDecay = 0} -------------------------------------------------------------------------- -- training -------------------------------------------------------------------------- -model:float() +model:cuda() model:training() -criterion = nn.CrossEntropyCriterion():float() +criterion = nn.CrossEntropyCriterion():cuda() + +display_iter = 20 -max_iter = 20 +inputs = {torch.CudaTensor(),torch.FloatTensor()} +target = torch.CudaTensor() function train() local err = 0 - for i=1,max_iter do - xlua.progress(i,max_iter) - inputs,target = bp:getBatch(inputs,target) + for i=1,display_iter do + xlua.progress(i,display_iter) + inputs0,target0 = bp:getBatch(inputs0,target0) + inputs[1]:resize(inputs0[1]:size()):copy(inputs0[1]) + inputs[2]:resize(inputs0[2]:size()):copy(inputs0[2]) + target:resize(target0:size()):copy(target0) local batchSize = target:size(1) local feval = function(x) @@ -100,14 +145,19 @@ function train() local x,fx = optim.sgd(feval,parameters,optimState) err = err + fx[1] end - print('Training error: '..err/max_iter) + print('Training error: '..err/display_iter) end -train() +stepsize = 30000 -if false then - m = nnf.ROIPooling(50,50):float() - o = m:forward(batches) - g = m:backward(batches,o) -end +num_iter = 3000 +for i=1,num_iter do + print(('Iteration: %d/%d'):format(i,num_iter)) + if i%(stepsize/display_iter) == 0 then + optimState.learningRate = optimState.learningRate/10 + end + + train() + +end From 590cd806c5bd92dafbbd51a226bf5a2153948942 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Wed, 19 Aug 2015 21:41:59 +0200 Subject: [PATCH 10/79] Cleaning up and fix in test --- BatchProviderROI.lua | 14 ++------------ test_frcnn.lua | 20 ++++++++++++++++++-- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 5492853..f6fdf44 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -55,24 +55,16 @@ function BatchProviderROI:permuteIdx() end function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) - --local fg_w = {} - --local bg_w = {} - local rois = {} local labels = {} for im=1,self.imgs_per_batch do - local im_scale = im_scales[im] - --fg_w[im] = {} - --bg_w[im] = {} - local window_idx = torch.randperm(#bg_windows[im]) for i=1,math.min(self.bg_num_each,#bg_windows[im]) do local curr_idx = bg_windows[im][window_idx[i] ][1] local position = bg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][0][position][{{2,5}}]--:totable() + local dd = self.bboxes[curr_idx][0][position][{{2,5}}] dd:add(-1):mul(im_scale):add(1) - --table.insert(bg_w[im],dd) table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) table.insert(labels,self.bboxes[curr_idx][0][position][6]) end @@ -81,16 +73,14 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) for i=1,math.min(self.fg_num_each,#fg_windows[im]) do local curr_idx = fg_windows[im][window_idx[i] ][1] local position = fg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][1][position][{{2,5}}]--:totable() + local dd = self.bboxes[curr_idx][1][position][{{2,5}}] dd:add(-1):mul(im_scale):add(1) - --table.insert(fg_w[im],dd) table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) table.insert(labels,self.bboxes[curr_idx][1][position][6]) end end rois = torch.FloatTensor(rois) labels = torch.IntTensor(labels) - --return fg_w,bg_w return rois, labels end diff --git a/test_frcnn.lua b/test_frcnn.lua index 61455d5..fb54005 100644 --- a/test_frcnn.lua +++ b/test_frcnn.lua @@ -23,11 +23,16 @@ else ds.roidb = dt.roidb end +local image_transformer= nnf.ImageTransformer{mean_pix={103.939, 116.779, 123.68}, + raw_scale = 255, + swap = {3,2,1}} if true then bp = nnf.BatchProviderROI(ds) + bp.image_transformer = image_transformer bp:setupData() else bp = nnf.BatchProviderROI(ds) + bp.image_transformer = image_transformer local temp = torch.load('pascal_2007_train_bp.t7') bp.bboxes = temp.bboxes end @@ -94,13 +99,17 @@ end print(model) parameters,gradParameters = model:getParameters() -optimState = {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, +optimState = {learningRate = 1e-4, weightDecay = 0.0005, momentum = 0.9, learningRateDecay = 0} -------------------------------------------------------------------------- -- training -------------------------------------------------------------------------- +confusion_matrix = optim.ConfusionMatrix(21) + +savedModel = model:clone('weight','bias','running_mean','running_std') + model:cuda() model:training() @@ -138,6 +147,8 @@ function train() gradParameters:div(batchSize) f = f/batchSize end + + confusion_matrix:batchAdd(outputs,target) return f,gradParameters end @@ -157,7 +168,12 @@ for i=1,num_iter do if i%(stepsize/display_iter) == 0 then optimState.learningRate = optimState.learningRate/10 end + + confusion_matrix:zero() train() - + print(confusion_matrix) + if i%100 == 0 then + torch.save(paths.concat('cachedir','frcnn_t1.t7'),savedModel) + end end From 19a8e7f95b90641e7e3a812463841353b55526bf Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Thu, 20 Aug 2015 18:35:55 +0200 Subject: [PATCH 11/79] Add gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ae7e697 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +*~ +*.swp +cachedir/* From e9e55e79cab717f512e3d4efb40352b54271b3ec Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Sat, 22 Aug 2015 15:55:41 +0200 Subject: [PATCH 12/79] Fix unitialized memory in BatchProviderROI --- BatchProviderROI.lua | 21 +++++++------ test_frcnn.lua | 71 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 70 insertions(+), 22 deletions(-) diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index f6fdf44..2260f3e 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -12,10 +12,8 @@ end -- setup is the same function BatchProviderROI:permuteIdx() - local fg_num_each = self.fg_num_each - local bg_num_each = self.bg_num_each - local fg_num_total = self.fg_num_total - local bg_num_total = self.bg_num_total + --local fg_num_total = self.fg_num_total + --local bg_num_total = self.bg_num_total local total_img = self.dataset:size() local imgs_per_batch = self.imgs_per_batch @@ -26,8 +24,8 @@ function BatchProviderROI:permuteIdx() self._cur = 1 end - local img_idx = self._perm[{{self._cur,self._cur + self.imgs_per_batch - 1}}] - self._cur = self._cur + self.imgs_per_batch + local img_idx = self._perm[{{self._cur,self._cur + self.imgs_per_batch - 1}}] + self._cur = self._cur + self.imgs_per_batch local img_idx_end = imgs_per_batch @@ -55,12 +53,16 @@ function BatchProviderROI:permuteIdx() end function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) + local fg_num_each = self.fg_num_each + local bg_num_each = self.bg_num_each + local rois = {} local labels = {} for im=1,self.imgs_per_batch do local im_scale = im_scales[im] local window_idx = torch.randperm(#bg_windows[im]) - for i=1,math.min(self.bg_num_each,#bg_windows[im]) do + local end_idx = math.min(bg_num_each,#bg_windows[im]) + for i=1,end_idx do local curr_idx = bg_windows[im][window_idx[i] ][1] local position = bg_windows[im][window_idx[i] ][2] local dd = self.bboxes[curr_idx][0][position][{{2,5}}] @@ -70,7 +72,8 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) end window_idx = torch.randperm(#fg_windows[im]) - for i=1,math.min(self.fg_num_each,#fg_windows[im]) do + local end_idx = math.min(fg_num_each,#fg_windows[im]) + for i=1,end_idx do local curr_idx = fg_windows[im][window_idx[i] ][1] local position = fg_windows[im][window_idx[i] ][2] local dd = self.bboxes[curr_idx][1][position][{{2,5}}] @@ -110,7 +113,7 @@ local function getImages(self,img_ids,images) -- create single tensor with all images, padding with zero for different sizes im_sizes = torch.IntTensor(im_sizes) local max_shape = im_sizes:max(1)[1] - images:resize(num_images,3,max_shape[1],max_shape[2]) + images:resize(num_images,3,max_shape[1],max_shape[2]):zero() for i=1,num_images do images[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) end diff --git a/test_frcnn.lua b/test_frcnn.lua index fb54005..109c7cd 100644 --- a/test_frcnn.lua +++ b/test_frcnn.lua @@ -1,4 +1,8 @@ require 'nnf' +require 'inn' +require 'cudnn' + +cutorch.setDevice(1) dt = torch.load('pascal_2007_train.t7') if false then @@ -19,16 +23,17 @@ if false then ds.roidb[i] = torch.IntTensor(10,4):random(1,5) ds.roidb[i][{{},{3,4}}]:add(6) end -else +elseif true then ds.roidb = dt.roidb end -local image_transformer= nnf.ImageTransformer{mean_pix={103.939, 116.779, 123.68}, +local image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717},--{103.939, 116.779, 123.68}, raw_scale = 255, swap = {3,2,1}} if true then bp = nnf.BatchProviderROI(ds) bp.image_transformer = image_transformer + bp.bg_threshold = {0.1,0.5} bp:setupData() else bp = nnf.BatchProviderROI(ds) @@ -37,6 +42,17 @@ else bp.bboxes = temp.bboxes end + +if false then + local mytest = nnf.ROIPooling(50,50):float() + function do_mytest() + local input0,target0 = bp:getBatch(input0,target0) + local o = mytest:forward(input0) + return input0,target0,o + end + --input0,target0,o = do_mytest() +end + --------------------------------------------------------------------------------------- -- model --------------------------------------------------------------------------------------- @@ -58,7 +74,7 @@ do classifier:add(nn.Dropout(0.5)) classifier:add(nn.Linear(1024,21)) - else + elseif false then require 'loadcaffe' -- local rcnnfold = '/home/francisco/work/libraries/rcnn/' -- local base_model = loadcaffe.load( @@ -83,6 +99,23 @@ do classifier:add(nn.Linear(4096,21):cuda()) collectgarbage() + + else + local fold = 'data/models/imagenet_models/alexnet/' + local m1 = torch.load(fold..'features.t7') + local m2 = torch.load(fold..'top.t7') + + for i=1,14 do + features:add(m1:get(i):clone()) + end + + for i=2,7 do + classifier:add(m2:get(i):clone()) + end + local linear = nn.Linear(4096,21):cuda() + linear.weight:normal(0,0.01) + linear.bias:zero() + classifier:add(linear) end collectgarbage() @@ -90,16 +123,18 @@ do prl:add(features) prl:add(nn.Identity()) model:add(prl) - model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) - --model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) + --model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) + model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) model:add(nn.View(-1):setNumInputDims(3)) model:add(classifier) end print(model) + +model:cuda() parameters,gradParameters = model:getParameters() -optimState = {learningRate = 1e-4, weightDecay = 0.0005, momentum = 0.9, +optimState = {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, learningRateDecay = 0} -------------------------------------------------------------------------- @@ -108,16 +143,20 @@ optimState = {learningRate = 1e-4, weightDecay = 0.0005, momentum = 0.9, confusion_matrix = optim.ConfusionMatrix(21) -savedModel = model:clone('weight','bias','running_mean','running_std') -model:cuda() model:training() +savedModel = model:clone('weight','bias','running_mean','running_std') + criterion = nn.CrossEntropyCriterion():cuda() +--criterion.nll.sizeAverage = false + +--normalize = true display_iter = 20 -inputs = {torch.CudaTensor(),torch.FloatTensor()} +--inputs = {torch.CudaTensor(),torch.FloatTensor()} +inputs = {torch.CudaTensor(),torch.CudaTensor()} target = torch.CudaTensor() function train() @@ -159,9 +198,12 @@ function train() print('Training error: '..err/display_iter) end +epoch_size = math.ceil(ds:size()/bp.imgs_per_batch) stepsize = 30000 +print_step = 10 +num_iter = 40000/display_iter--3000 -num_iter = 3000 +confusion_matrix:zero() for i=1,num_iter do print(('Iteration: %d/%d'):format(i,num_iter)) @@ -169,10 +211,13 @@ for i=1,num_iter do optimState.learningRate = optimState.learningRate/10 end - confusion_matrix:zero() - train() - print(confusion_matrix) + + if i%print_step == 0 then + print(confusion_matrix) + confusion_matrix:zero() + end + if i%100 == 0 then torch.save(paths.concat('cachedir','frcnn_t1.t7'),savedModel) end From 077c169a2ff29b9806ba9770574567e5a9467538 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Sat, 22 Aug 2015 16:34:08 +0200 Subject: [PATCH 13/79] Fix bug in BatchProviderROI --- BatchProviderROI.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 2260f3e..e1e1655 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -65,7 +65,7 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) for i=1,end_idx do local curr_idx = bg_windows[im][window_idx[i] ][1] local position = bg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][0][position][{{2,5}}] + local dd = self.bboxes[curr_idx][0][position][{{2,5}}]:clone() dd:add(-1):mul(im_scale):add(1) table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) table.insert(labels,self.bboxes[curr_idx][0][position][6]) @@ -76,7 +76,7 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) for i=1,end_idx do local curr_idx = fg_windows[im][window_idx[i] ][1] local position = fg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][1][position][{{2,5}}] + local dd = self.bboxes[curr_idx][1][position][{{2,5}}]:clone() dd:add(-1):mul(im_scale):add(1) table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) table.insert(labels,self.bboxes[curr_idx][1][position][6]) From d1d9f9c819dd330d016ea43c5fb0b9e7c15a6251 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Sun, 23 Aug 2015 13:49:37 +0200 Subject: [PATCH 14/79] Add flip in training and test code --- BatchProviderROI.lua | 30 +++++-- ImageDetect.lua | 88 ++++++++++++++++++++ Tester_FRCNN.lua | 192 +++++++++++++++++++++++++++++++++++++++++++ nnf.lua | 2 + test_frcnn.lua | 28 +++++-- 5 files changed, 325 insertions(+), 15 deletions(-) create mode 100644 ImageDetect.lua create mode 100644 Tester_FRCNN.lua diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index e1e1655..9844ee7 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -46,13 +46,13 @@ function BatchProviderROI:permuteIdx() end end end - - local opts = {img_idx=img_idx,img_idx_end=img_idx_end} + local do_flip = torch.FloatTensor(imgs_per_batch):random(0,1) + local opts = {img_idx=img_idx,img_idx_end=img_idx_end,do_flip=do_flip} return fg_windows,bg_windows,opts end -function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) +function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales,do_flip,im_sizes) local fg_num_each = self.fg_num_each local bg_num_each = self.bg_num_each @@ -62,11 +62,18 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) local im_scale = im_scales[im] local window_idx = torch.randperm(#bg_windows[im]) local end_idx = math.min(bg_num_each,#bg_windows[im]) + local flip = do_flip[im] == 1 + local im_size = im_sizes[im] for i=1,end_idx do local curr_idx = bg_windows[im][window_idx[i] ][1] local position = bg_windows[im][window_idx[i] ][2] local dd = self.bboxes[curr_idx][0][position][{{2,5}}]:clone() dd:add(-1):mul(im_scale):add(1) + if flip then + local tt = dd[1] + dd[1] = im_size[2]-dd[3] +1 + dd[3] = im_size[2]-tt +1 + end table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) table.insert(labels,self.bboxes[curr_idx][0][position][6]) end @@ -78,6 +85,11 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) local position = fg_windows[im][window_idx[i] ][2] local dd = self.bboxes[curr_idx][1][position][{{2,5}}]:clone() dd:add(-1):mul(im_scale):add(1) + if flip then + local tt = dd[1] + dd[1] = im_size[2]-dd[3] +1 + dd[3] = im_size[2]-tt +1 + end table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) table.insert(labels,self.bboxes[curr_idx][1][position][6]) end @@ -87,7 +99,7 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales) return rois, labels end -local function getImages(self,img_ids,images) +local function getImages(self,img_ids,images,do_flip) local dataset = self.dataset local num_images = img_ids:size(1) @@ -98,6 +110,10 @@ local function getImages(self,img_ids,images) for i=1,num_images do local im = dataset:getImage(img_ids[i]) im = self.image_transformer:preprocess(im) + local flip = do_flip[i] == 1 + if flip then + im = image.hflip(im) + end local im_size = im[1]:size() local im_size_min = math.min(im_size[1],im_size[2]) local im_size_max = math.max(im_size[1],im_size[2]) @@ -117,7 +133,7 @@ local function getImages(self,img_ids,images) for i=1,num_images do images[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) end - return im_scales + return im_scales,im_sizes end @@ -133,8 +149,8 @@ function BatchProviderROI:getBatch(batches,targets) local batches = batches or {torch.FloatTensor(),torch.FloatTensor()} local targets = targets or torch.FloatTensor() - local im_scales = getImages(self,opts.img_idx,batches[1]) - local rois,labels = self:selectBBoxes(fg_windows,bg_windows,im_scales) + local im_scales, im_sizes = getImages(self,opts.img_idx,batches[1],opts.do_flip) + local rois,labels = self:selectBBoxes(fg_windows,bg_windows,im_scales,opts.do_flip, im_sizes) batches[2]:resizeAs(rois):copy(rois) targets:resize(labels:size()):copy(labels) diff --git a/ImageDetect.lua b/ImageDetect.lua new file mode 100644 index 0000000..27bf8e2 --- /dev/null +++ b/ImageDetect.lua @@ -0,0 +1,88 @@ +local ImageDetect = torch.class('nnf.ImageDetect') + +function ImageDetect:__init(model) + self.model = model + self.image_transformer = nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} + self.scale = {600} + self.max_size = 1000 + self.sm = nn.SoftMax():cuda() +end + + +local function getImages(self,images,im) + local num_scales = #self.scale + + local imgs = {} + local im_sizes = {} + local im_scales = {} + + im = self.image_transformer:preprocess(im) + + local im_size = im[1]:size() + local im_size_min = math.min(im_size[1],im_size[2]) + local im_size_max = math.max(im_size[1],im_size[2]) + for i=1,num_scales do + local im_scale = self.scale[i]/im_size_min + if torch.round(im_scale*im_size_max) > self.max_size then + im_scale = self.max_size/im_size_max + end + local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} + table.insert(imgs,image.scale(im,im_s[2],im_s[1])) + table.insert(im_sizes,im_s) + table.insert(im_scales,im_scale) + end + -- create single tensor with all images, padding with zero for different sizes + im_sizes = torch.IntTensor(im_sizes) + local max_shape = im_sizes:max(1)[1] + images:resize(num_scales,3,max_shape[1],max_shape[2]):zero() + for i=1,num_scales do + images[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) + end + return im_scales +end + +local function project_im_rois(im_rois,scales) + local levels + local rois = torch.FloatTensor() + if #scales > 1 then + local scales = torch.FloatTensor(scales) + local widths = im_rois[{{},3}] - im_rois[{{},1}] + 1 + local heights = im_rois[{{},4}] - im_rois[{{}, 2}] + 1 + + local areas = widths * heights + local scaled_areas = areas:view(-1,1) * torch.pow(scales:view(1,-1),2) + local diff_areas = torch.abs(scaled_areas - 224 * 224) + levels = select(2, diff_areas:min(2)) + else + levels = torch.FloatTensor() + rois:resize(im_rois:size(1),5) + rois[{{},1}]:fill(1) + rois[{{},{2,5}}]:copy(im_rois):add(-1):mul(scales[1]):add(1) + end + + return rois + +end + +-- supposes boxes is in [x1,y1,x2,y2] format +function ImageDetect:detect(im,boxes) + local inputs = {torch.FloatTensor(),torch.FloatTensor()} + local im_scales = getImages(self,inputs[1],im) + inputs[2] = project_im_rois(boxes,im_scales) + + local inputs_cuda = {torch.CudaTensor(),torch.CudaTensor()} + inputs_cuda[1]:resize(inputs[1]:size()):copy(inputs[1]) + inputs_cuda[2]:resize(inputs[2]:size()):copy(inputs[2]) + local output0 = self.model:forward(inputs_cuda) + local output = self.sm:forward(output0):float() + --[[ + for i=1,#im_scales do + local dd = boxes:clone() + dd:add(-1):mul(im_scale[i]):add(1) + + end + --]] + return output +end diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua new file mode 100644 index 0000000..7e8a139 --- /dev/null +++ b/Tester_FRCNN.lua @@ -0,0 +1,192 @@ +local utils = paths.dofile('utils.lua') +local nms = paths.dofile('nms.lua') + +local keep_top_k = utils.keep_top_k +local VOCevaldet = utils.VOCevaldet + +local Tester = torch.class('nnf.Tester_FRCNN') + +function Tester:__init(module,feat_provider) + self.dataset = feat_provider.dataset + self.module = module + self.feat_provider = feat_provider + + self.feat_dim = {256*50} + self.max_batch_size = 4000 + + self.cachefolder = nil + self.cachename = nil + self.suffix = '' + self.verbose = true +end + +-- improve it ! +function Tester:validate(criterion) + + local tname = paths.concat(self.cachefolder,self.cachename) + local valData + if paths.filep(tname) then + valData = torch.load(tname) + else + -- batch_provider need to be set before + valData = {} + valData.inputs,valData.targets = self.batch_provider:getBatch() + torch.save(tname,valData) + self.batch_provider = nil + end + + local num_batches = valData.inputs:size(1) + local module = self.module + + local err = 0 + local inputs = torch.CudaTensor() + local targets = torch.CudaTensor() + for t=1,num_batches do + xlua.progress(t,num_batches) + + inputs:resize(valData.inputs[t]:size()):copy(valData.inputs[t]) + targets:resize(valData.targets[t]:size()):copy(valData.targets[t]) + + local output = module:forward(inputs) + + err = err + criterion:forward(output,targets) + end + + valData = nil + collectgarbage() + + return err/num_batches +end + +function Tester:test(iteration) + + local dataset = self.dataset + local module = self.module + local feat_provider = self.feat_provider + + local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) + paths.mkdir(pathfolder) + + module:evaluate() + dataset:loadROIDB() + + local feats = torch.FloatTensor() + local feats_batched = {} + local feats_cuda = torch.CudaTensor() + + local output = torch.FloatTensor() + + local output_dim = module:get(module:size()) + + local softmax = nn.SoftMax():float() + + local boxes + -- + local aboxes = {} + for i=1,dataset.num_classes do + table.insert(aboxes,{}) + end + + local max_per_set = 5*dataset:size() + local max_per_image = 100 + local thresh = torch.ones(dataset.num_classes):mul(-1.5) + local scored_boxes = torch.FloatTensor() + + local timer = torch.Timer() + local timer2 = torch.Timer() + local timer3 = torch.Timer() + local detec = nnf.ImageDetect(module) + for i=1,dataset:size() do + timer:reset() + io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); + boxes = dataset:getROIBoxes(i):float() + local im = dataset:getImage(i) + local output = detec:detect(im,boxes) + + local add_bg = 0 + if dataset.num_classes ~= output:size(2) then -- if there is no svm + --output = softmax:forward(output) + add_bg = 1 + end + local tt = 0 + local tt2 = timer3:time().real + + timer2:reset() + for j=1,dataset.num_classes do + local scores = output:select(2,j+add_bg) + local idx = torch.range(1,scores:numel()):long() + local idx2 = scores:gt(thresh[j]) + idx = idx[idx2] + scored_boxes:resize(idx:numel(),5) + if scored_boxes:numel() > 0 then + scored_boxes:narrow(2,1,4):index(boxes,1,idx) + scored_boxes:select(2,5):copy(scores[idx2]) + end + local keep = nms(scored_boxes,0.3) + if keep:numel()>0 then + local _,ord = torch.sort(scored_boxes:select(2,5):index(1,keep),true) + ord = ord:narrow(1,1,math.min(ord:numel(),max_per_image)) + keep = keep:index(1,ord) + aboxes[j][i] = scored_boxes:index(1,keep) + else + aboxes[j][i] = torch.FloatTensor() + end + + if i%1000 == 0 then + aboxes[j],thresh[j] = keep_top_k(aboxes[j],max_per_set) + end + + end + + io.write((' prepare feat time: %.3f, forward time: %.3f, select time: %.3fs, total time: %.3fs\n'):format(tt,tt2,timer2:time().real,timer:time().real)); + --collectgarbage() + --mattorch.save(paths.concat(pathfolder,dataset.img_ids[i]..'.mat'),output:double()) + end + + for i = 1,dataset.num_classes do + -- go back through and prune out detections below the found threshold + for j = 1,dataset:size() do + if aboxes[i][j]:numel() > 0 then + local I = aboxes[i][j]:select(2,5):lt(thresh[i]) + local idx = torch.range(1,aboxes[i][j]:size(1)):long() + idx = idx[I] + if idx:numel()>0 then + aboxes[i][j] = aboxes[i][j]:index(1,idx) + end + end + end + save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. + dataset.dataset_name..self.suffix) + torch.save(save_file, aboxes) + end + + local res = {} + for i=1,dataset.num_classes do + local cls = dataset.classes[i] + res[i] = VOCevaldet(dataset,aboxes[i],cls) + end + res = torch.Tensor(res) + print('Results:') + -- print class names + io.write('|') + for i = 1, dataset.num_classes do + io.write(('%5s|'):format(dataset.classes[i])) + end + io.write('\n|') + -- print class scores + for i = 1, dataset.num_classes do + local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] + local l = res[i] == res[i] and l-5 or l-3 + if l > 0 then + io.write(('%.3f%'..l..'s|'):format(res[i],' ')) + else + io.write(('%.3f|'):format(res[i])) + end + end + io.write('\n') + io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) + + -- clean roidb to free memory + dataset.roidb = nil + return res +end diff --git a/nnf.lua b/nnf.lua index 2cf0592..f4610da 100644 --- a/nnf.lua +++ b/nnf.lua @@ -15,8 +15,10 @@ torch.include('nnf','ROIPooling.lua') torch.include('nnf','Trainer.lua') torch.include('nnf','Tester.lua') +torch.include('nnf','Tester_FRCNN.lua') torch.include('nnf','SVMTrainer.lua') torch.include('nnf','ImageTransformer.lua') +torch.include('nnf','ImageDetect.lua') --return nnf diff --git a/test_frcnn.lua b/test_frcnn.lua index 109c7cd..4db50a9 100644 --- a/test_frcnn.lua +++ b/test_frcnn.lua @@ -2,7 +2,7 @@ require 'nnf' require 'inn' require 'cudnn' -cutorch.setDevice(1) +cutorch.setDevice(2) dt = torch.load('pascal_2007_train.t7') if false then @@ -11,7 +11,7 @@ if false then roidbdir='/home/francisco/work/datasets/rcnn/selective_search_data' } else - ds = nnf.DataSetPascal{image_set='train', + ds = nnf.DataSetPascal{image_set='trainval', datadir='datasets/VOCdevkit', roidbdir='data/selective_search_data' } @@ -23,7 +23,7 @@ if false then ds.roidb[i] = torch.IntTensor(10,4):random(1,5) ds.roidb[i][{{},{3,4}}]:add(6) end -elseif true then +elseif false then ds.roidb = dt.roidb end @@ -123,8 +123,8 @@ do prl:add(features) prl:add(nn.Identity()) model:add(prl) - --model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) - model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) + model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) + --model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) model:add(nn.View(-1):setNumInputDims(3)) model:add(classifier) @@ -155,8 +155,8 @@ criterion = nn.CrossEntropyCriterion():cuda() display_iter = 20 ---inputs = {torch.CudaTensor(),torch.FloatTensor()} -inputs = {torch.CudaTensor(),torch.CudaTensor()} +inputs = {torch.CudaTensor(),torch.FloatTensor()} +--inputs = {torch.CudaTensor(),torch.CudaTensor()} target = torch.CudaTensor() function train() @@ -219,6 +219,18 @@ for i=1,num_iter do end if i%100 == 0 then - torch.save(paths.concat('cachedir','frcnn_t1.t7'),savedModel) + torch.save(paths.concat('cachedir','frcnn_t2.t7'),savedModel) end end + +-- test +dsv = nnf.DataSetPascal{image_set='test', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } + + +local fpv = {dataset=dsv} +tester = nnf.Tester_FRCNN(model,fpv) +tester.cachefolder = 'cachedir/frcnn_t2' +tester:test(num_iter) From b810d297b9de71fe8b3810e6945d791a5a2831d5 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Sun, 23 Aug 2015 17:20:40 +0200 Subject: [PATCH 15/79] Force rois to be in cpu GPU single element access is much slower --- ROIPooling.lua | 10 ++++++++++ Tester_FRCNN.lua | 1 + 2 files changed, 11 insertions(+) diff --git a/ROIPooling.lua b/ROIPooling.lua index 0345628..af12402 100644 --- a/ROIPooling.lua +++ b/ROIPooling.lua @@ -29,6 +29,13 @@ function ROIPooling:updateOutput(input) rois[{{},4}]:cmin(s[ss]) rois[{{},5}]:cmin(s[ss-1]) + -- element access is faster if not a cuda tensor + if rois:type() == 'torch.CudaTensor' then + self._rois = self._rois or torch.FloatTensor() + self._rois:resize(rois:size()):copy(rois) + rois = self._rois + end + if not self._type then self._type = output:type() end if #self.pooler < num_rois then @@ -50,6 +57,9 @@ end function ROIPooling:updateGradInput(input,gradOutput) local data = input[1] local rois = input[2] + if rois:type() == 'torch.CudaTensor' then + rois = self._rois + end local num_rois = rois:size(1) local s = data:size() local ss = s:size(1) diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua index 7e8a139..3a541e0 100644 --- a/Tester_FRCNN.lua +++ b/Tester_FRCNN.lua @@ -101,6 +101,7 @@ function Tester:test(iteration) io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); boxes = dataset:getROIBoxes(i):float() local im = dataset:getImage(i) + timer3:reset() local output = detec:detect(im,boxes) local add_bg = 0 From e32c8d4385f1d404eb59cb53c31baa822495ba6f Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Sun, 30 Aug 2015 17:15:39 +0200 Subject: [PATCH 16/79] Fix wrong batch-size --- BatchProviderROI.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 9844ee7..1e40bcf 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -53,8 +53,8 @@ function BatchProviderROI:permuteIdx() end function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales,do_flip,im_sizes) - local fg_num_each = self.fg_num_each - local bg_num_each = self.bg_num_each + local fg_num_each = torch.round(self.fg_num_each/self.imgs_per_batch) + local bg_num_each = torch.round(self.bg_num_each/self.imgs_per_batch) local rois = {} local labels = {} @@ -121,7 +121,7 @@ local function getImages(self,img_ids,images,do_flip) if torch.round(im_scale*im_size_max) > self.max_size then im_scale = self.max_size/im_size_max end - local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} + local im_s = {torch.round(im_size[1]*im_scale),torch.round(im_size[2]*im_scale)} table.insert(imgs,image.scale(im,im_s[2],im_s[1])) table.insert(im_sizes,im_s) table.insert(im_scales,im_scale) From 639af95b172e894a48de8dca7dd9b945ff311d70 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Mon, 31 Aug 2015 23:04:41 +0200 Subject: [PATCH 17/79] Modifications in the training procedure Trying to mimic exactly Caffe's SGD --- test_frcnn.lua | 76 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 61 insertions(+), 15 deletions(-) diff --git a/test_frcnn.lua b/test_frcnn.lua index 4db50a9..24bb23b 100644 --- a/test_frcnn.lua +++ b/test_frcnn.lua @@ -1,6 +1,7 @@ require 'nnf' require 'inn' require 'cudnn' +require 'gnuplot' cutorch.setDevice(2) @@ -92,7 +93,6 @@ do for i=1,14 do features:add(base_model:get(i):clone()) end - for i=17,22 do classifier:add(base_model:get(i):clone()) end @@ -108,7 +108,11 @@ do for i=1,14 do features:add(m1:get(i):clone()) end - + features:get(3).padW = 1 + features:get(3).padH = 1 + features:get(7).padW = 1 + features:get(7).padH = 1 + for i=2,7 do classifier:add(m2:get(i):clone()) end @@ -123,8 +127,8 @@ do prl:add(features) prl:add(nn.Identity()) model:add(prl) - model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) - --model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) + --model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) + model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) model:add(nn.View(-1):setNumInputDims(3)) model:add(classifier) @@ -134,8 +138,22 @@ print(model) model:cuda() parameters,gradParameters = model:getParameters() -optimState = {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, - learningRateDecay = 0} +parameters2,gradParameters2 = model:parameters() + +lr = {0,0,1,2,1,2,1,2,1,2,1,2,1,2,1,2} +wd = {0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0} + +local function updateGPlrwd(clr) + local clr = clr or 1 + for i,p in pairs(gradParameters2) do + p:add(wd[i]*0.0005,parameters2[i]) + p:mul(lr[i]*clr) + end +end + +optimState = {learningRate = 1,--1e-3, + weightDecay = 0.000, momentum = 0.9, + learningRateDecay = 0, dampening=0} -------------------------------------------------------------------------- -- training @@ -155,10 +173,12 @@ criterion = nn.CrossEntropyCriterion():cuda() display_iter = 20 -inputs = {torch.CudaTensor(),torch.FloatTensor()} ---inputs = {torch.CudaTensor(),torch.CudaTensor()} +--inputs = {torch.CudaTensor(),torch.FloatTensor()} +inputs = {torch.CudaTensor(),torch.CudaTensor()} target = torch.CudaTensor() +learningRate = 1e-3 + function train() local err = 0 for i=1,display_iter do @@ -182,6 +202,10 @@ function train() model:backward(inputs,df_do) + -- mimic different learning rates per layer + -- without the cost of having a huge tensor + updateGPlrwd(learningRate) + if normalize then gradParameters:div(batchSize) f = f/batchSize @@ -196,30 +220,52 @@ function train() err = err + fx[1] end print('Training error: '..err/display_iter) + return err/display_iter end epoch_size = math.ceil(ds:size()/bp.imgs_per_batch) -stepsize = 30000 +stepsize = 30000--30000 print_step = 10 -num_iter = 40000/display_iter--3000 +num_iter = 40000--40000 +num_iter = num_iter/display_iter--3000 confusion_matrix:zero() +train_err = {} +exp_name = 'frcnn_t11' +paths.mkdir(paths.concat('cachedir',exp_name)) +--logger = optim.Logger(paths.concat('cachedir',exp_name,'train_err.log')) +train_acc = {} for i=1,num_iter do - print(('Iteration: %d/%d'):format(i,num_iter)) + if i%(stepsize/display_iter) == 0 then - optimState.learningRate = optimState.learningRate/10 + --optimState.learningRate = optimState.learningRate/10 + learningRate = learningRate/10 end - train() + --print(('Iteration: %d/%d, lr: %.5f'):format(i,num_iter,optimState.learningRate)) + print(('Iteration: %d/%d, lr: %.5f'):format(i,num_iter,learningRate)) + + local t_err = train() + table.insert(train_err,t_err) + if i%print_step == 0 then print(confusion_matrix) + table.insert(train_acc,confusion_matrix.averageUnionValid*100) + gnuplot.epsfigure(paths.concat('cachedir',exp_name,'train_err.eps')) + gnuplot.plot('train',torch.Tensor(train_acc),'-') + gnuplot.xlabel('Iterations (200 batch update)') + gnuplot.ylabel('Training accuracy') + gnuplot.grid('on') + gnuplot.plotflush() + gnuplot.closeall() + confusion_matrix:zero() end if i%100 == 0 then - torch.save(paths.concat('cachedir','frcnn_t2.t7'),savedModel) + torch.save(paths.concat('cachedir',exp_name..'.t7'),savedModel) end end @@ -232,5 +278,5 @@ dsv = nnf.DataSetPascal{image_set='test', local fpv = {dataset=dsv} tester = nnf.Tester_FRCNN(model,fpv) -tester.cachefolder = 'cachedir/frcnn_t2' +tester.cachefolder = 'cachedir/'..exp_name tester:test(num_iter) From da0dc4ad69f1929ff726443bf5642fcf236acaf8 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 13 Sep 2015 23:41:04 +0200 Subject: [PATCH 18/79] Started refactoring code to unify all frameworks. Doesn't work yet --- BatchProvider.lua | 114 ++++++--------------------------- BatchProviderBase.lua | 146 ++++++++++++++++++++++++++++++++++++++++++ BatchProviderROI.lua | 2 +- Trainer.lua | 39 ++++++----- main.lua | 77 ++-------------------- train.lua | 74 +++++++++++++++++++++ utils.lua | 17 +++++ 7 files changed, 282 insertions(+), 187 deletions(-) create mode 100644 BatchProviderBase.lua create mode 100644 train.lua diff --git a/BatchProvider.lua b/BatchProvider.lua index 977bdc7..e1b817f 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -1,32 +1,4 @@ -local BatchProvider = torch.class('nnf.BatchProvider') - -local function createWindowBase(rec,i,j,is_bg) - local label = is_bg == true and 0+1 or rec.label[j]+1 - local window = {i,rec.boxes[j][1],rec.boxes[j][2], - rec.boxes[j][3],rec.boxes[j][4], - label} - return window -end - -local function createWindowAngle(rec,i,j,is_bg) - local label = is_bg == true and 0+1 or rec.label[j]+1 - --local ang = ( is_bg == false and rec.objects[rec.correspondance[j] ] ) and - -- rec.objects[rec.correspondance[j] ].viewpoint.azimuth or 0 - local ang - if is_bg == false and rec.objects[rec.correspondance[j] ] then - if rec.objects[rec.correspondance[j] ].viewpoint.distance == '0' then - ang = rec.objects[rec.correspondance[j] ].viewpoint.azimuth_coarse - else - ang = rec.objects[rec.correspondance[j] ].viewpoint.azimuth - end - else - ang = 0 - end - local window = {i,rec.boxes[j][1],rec.boxes[j][2], - rec.boxes[j][3],rec.boxes[j][4], - label,ang} - return window -end +local BatchProvider = torch.class('nnf.BatchProvider','nnf.BatchProviderBase') function BatchProvider:__init(feat_provider) self.dataset = feat_provider.dataset @@ -35,75 +7,11 @@ function BatchProvider:__init(feat_provider) self.nTimesMoreData = 10 self.iter_per_batch = 500 - self.batch_size = 128 - self.fg_fraction = 0.25 - - self.fg_threshold = 0.5 - self.bg_threshold = {0.0,0.5} - - self.createWindow = createWindowBase--createWindowAngle - self.batch_dim = {256*50} self.target_dim = 1 - self.do_flip = true - - --self:setupData() end - -function BatchProvider:setupData() - local dataset = self.dataset - local bb = {} - local bbT = {} - - for i=0,dataset.num_classes do -- 0 because of background - bb[i] = {} - end - - for i=1,dataset.num_imgs do - bbT[i] = {} - end - - for i = 1,dataset.num_imgs do - if dataset.num_imgs > 10 then - xlua.progress(i,dataset.num_imgs) - end - - local rec = dataset:attachProposals(i) - - for j=1,rec:size() do - local id = rec.label[j] - local is_fg = (rec.overlap[j] >= self.fg_threshold) - local is_bg = (not is_fg) and (rec.overlap[j] >= self.bg_threshold[1] and - rec.overlap[j] < self.bg_threshold[2]) - if is_fg then - local window = self.createWindow(rec,i,j,is_bg) - table.insert(bb[1], window) -- could be id instead of 1 - elseif is_bg then - local window = self.createWindow(rec,i,j,is_bg) - table.insert(bb[0], window) - end - - end - - for j=0,dataset.num_classes do -- 0 because of background - if #bb[j] > 0 then - bbT[i][j] = torch.FloatTensor(bb[j]) - end - end - - bb = {} - for i=0,dataset.num_classes do -- 0 because of background - bb[i] = {} - end - collectgarbage() - end - self.bboxes = bbT - --return bbT -end - - function BatchProvider:permuteIdx() local fg_num_each = self.fg_num_each local bg_num_each = self.bg_num_each @@ -234,7 +142,7 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_data,bg_data,fg_label,bg -- return fg_data,bg_data,fg_label,bg_label end -function BatchProvider:getBatch(batches,targets) +function BatchProvider:prepareBatch(batches,targets) local dataset = self.dataset self.fg_num_each = self.fg_fraction * self.batch_size @@ -300,3 +208,21 @@ function BatchProvider:getBatch(batches,targets) end return batches,targets end + +function BatchProvider:getBatch(batches,targets) + self._cur = self._cur or math.huge + -- we have reached the end of our batch pool, need to recompute + if self._cur > self.iter_per_batch then + self._batches,self._targets = self:prepareBatch(self._batches,self._targets) + self._cur = 1 + end + --local batches = batches or torch.FloatTensor() + --local targets = targets or torch.FloatTensor() + + local batches = self._batches[self._cur] + local targets = self._targets[self._cur] + self._cur = self._cur + 1 + + return batches, targets + +end diff --git a/BatchProviderBase.lua b/BatchProviderBase.lua new file mode 100644 index 0000000..e6c4c36 --- /dev/null +++ b/BatchProviderBase.lua @@ -0,0 +1,146 @@ +local function createWindowBase(rec,i,j,is_bg) + local label = is_bg == true and 0+1 or rec.label[j]+1 + local window = {i,rec.boxes[j][1],rec.boxes[j][2], + rec.boxes[j][3],rec.boxes[j][4], + label} + return window +end + +local function createWindowAngle(rec,i,j,is_bg) + local label = is_bg == true and 0+1 or rec.label[j]+1 + --local ang = ( is_bg == false and rec.objects[rec.correspondance[j] ] ) and + -- rec.objects[rec.correspondance[j] ].viewpoint.azimuth or 0 + local ang + if is_bg == false and rec.objects[rec.correspondance[j] ] then + if rec.objects[rec.correspondance[j] ].viewpoint.distance == '0' then + ang = rec.objects[rec.correspondance[j] ].viewpoint.azimuth_coarse + else + ang = rec.objects[rec.correspondance[j] ].viewpoint.azimuth + end + else + ang = 0 + end + local window = {i,rec.boxes[j][1],rec.boxes[j][2], + rec.boxes[j][3],rec.boxes[j][4], + label,ang} + return window +end + + +local argcheck = require 'argcheck' +local initcheck = argcheck{ + --pack=true, + debug=true, + --noordered=true, +-- {name="self", +-- type="nnf.BatchProviderBase" +-- }, +-- {name="dataset", +-- type="nnf.DatasetPascal", +-- help="A dataset class" +-- }, + {name="batch_size", + type="number", + help="batch size"}, + {name="fg_fraction", + type="number", + help="foreground fraction in batch" + }, + {name="fg_threshold", + type="number", + help="foreground threshold" + }, + {name="bg_threshold", + type="number",--"table", + help="background threshold, in the form {LO,HI}" + }, +-- {name="createWindow", +-- type="function", +-- default=createWindowBase, +-- help="" +-- }, + {name="do_flip", + type="boolean", + help="sample batches with random flips" + }, + +} + + +local BatchProviderBase = torch.class('nnf.BatchProviderBase') + +BatchProviderBase.__init = initcheck +--[[ +function BatchProviderBase:__init(dataset) + + self.dataset = dataset + + self.batch_size = 128 + self.fg_fraction = 0.25 + + self.fg_threshold = 0.5 + self.bg_threshold = {0.0,0.5} + + self.createWindow = createWindowBase + + self.do_flip = true + +end +--]] + +function BatchProviderBase:setupData() + local dataset = self.dataset + local bb = {} + local bbT = {} + + for i=0,dataset.num_classes do -- 0 because of background + bb[i] = {} + end + + for i=1,dataset.num_imgs do + bbT[i] = {} + end + + for i = 1,dataset.num_imgs do + if dataset.num_imgs > 10 then + xlua.progress(i,dataset.num_imgs) + end + + local rec = dataset:attachProposals(i) + + for j=1,rec:size() do + local id = rec.label[j] + local is_fg = (rec.overlap[j] >= self.fg_threshold) + local is_bg = (not is_fg) and (rec.overlap[j] >= self.bg_threshold[1] and + rec.overlap[j] < self.bg_threshold[2]) + if is_fg then + local window = self.createWindow(rec,i,j,is_bg) + table.insert(bb[1], window) -- could be id instead of 1 + elseif is_bg then + local window = self.createWindow(rec,i,j,is_bg) + table.insert(bb[0], window) + end + + end + + for j=0,dataset.num_classes do -- 0 because of background + if #bb[j] > 0 then + bbT[i][j] = torch.FloatTensor(bb[j]) + end + end + + bb = {} + for i=0,dataset.num_classes do -- 0 because of background + bb[i] = {} + end + collectgarbage() + end + self.bboxes = bbT + --return bbT +end + +function BatchProviderBase:getBatch(input,target) + error("You can't use BatchProviderBase") + return input,target +end + diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 1e40bcf..7b6ffd5 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -1,4 +1,4 @@ -local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProvider') +local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProviderBase') function BatchProviderROI:__init(dataset) local fp = {dataset=dataset} diff --git a/Trainer.lua b/Trainer.lua index 180b1eb..87f6bfe 100644 --- a/Trainer.lua +++ b/Trainer.lua @@ -1,13 +1,16 @@ require 'nn' require 'optim' require 'xlua' +local utils = paths.dofile('utils.lua') +local recursiveResizeAsCopyTyped = utils.recursiveResizeAsCopyTyped local Trainer = torch.class('nnf.Trainer') -function Trainer:__init(module,criterion) +function Trainer:__init(module,criterion,batch_provider) self.module = module self.criterion = criterion + self.batch_provider = batch_provider self.parameters,self.gradParameters = self.module:getParameters() @@ -22,40 +25,39 @@ function Trainer:__init(module,criterion) end - -function Trainer:train(inputs,targets) - -- only for batches - assert(targets:dim()>2,'Trainer is only for batches') +function Trainer:train() self.module:training() - self._input = self._input or torch.CudaTensor() - self._target = self._target or torch.CudaTensor() local module = self.module + local batch_provider = self.batch_provider local parameters = self.parameters local gradParameters = self.gradParameters local criterion = self.criterion local optimState = self.optimState - local batchSize = inputs:size(2) - local maxIter = inputs:size(1) + --local maxIter = inputs:size(1) if self.confusion then self.confusion:zero() end local err = 0 - self._input:resize(inputs[1]:size()) - self._target:resize(targets[1]:size()) - local input = self._input - local target = self._target - + local input + local target + for t=1,maxIter do xlua.progress(t,maxIter) - input:copy(inputs[t]) - target:copy(targets[t]) + -- get training batch + self.input0,self.target0 = batch_provider(self.input0,self.target0) + + -- copy to ttype + self.input,self.input0 = recursiveResizeAsCopyTyped(self.input,self.input0,ttype) + self.target,self.target0 = recursiveResizeAsCopyTyped(self.target,self.target0,ttype) + input = self.input + target = self.target local feval = function(x) if x ~= parameters then @@ -70,11 +72,6 @@ function Trainer:train(inputs,targets) module:backward(input,df_do) - if self.normalize then - gradParameters:div(batchSize) - f = f/batchSize - end - if self.confusion then self.confusion:batchAdd(outputs,target) end diff --git a/main.lua b/main.lua index 65a4b18..2320090 100644 --- a/main.lua +++ b/main.lua @@ -35,76 +35,7 @@ paths.dofile('data.lua') -------------------------------------------------------------------------------- -- Prepare training model -------------------------------------------------------------------------------- - -trainer = nnf.Trainer(classifier,criterion) -trainer.optimState.learningRate = opt.lr - -local conf_classes = {} -table.insert(conf_classes,'background') -for i=1,#classes do - table.insert(conf_classes,classes[i]) -end -trainer.confusion = optim.ConfusionMatrix(conf_classes) - -validator = nnf.Tester(classifier,feat_provider_test) -validator.cachefolder = opt.save_base -validator.cachename = 'validation_data.t7' -validator.batch_provider = batch_provider_test - -logger = optim.Logger(paths.concat(opt.save,'log.txt')) -val_err = {} -val_counter = 0 -reduc_counter = 0 - -inputs = torch.FloatTensor() -targets = torch.IntTensor() -for i=1,opt.num_iter do - - print('Iteration: '..i..'/'..opt.num_iter) - inputs,targets = batch_provider:getBatch(inputs,targets) - print('==> Training '..paths.basename(opt.save_base)) - trainer:train(inputs,targets) - print('==> Training Error: '..trainer.fx[i]) - print(trainer.confusion) - - collectgarbage() - - err = validator:validate(criterion) - print('==> Validation Error: '..err) - table.insert(val_err,err) - - logger:add{['train error (iters per batch='..batch_provider.iter_per_batch.. - ')']=trainer.fx[i],['val error']=err, - ['learning rate']=trainer.optimState.learningRate} - - val_counter = val_counter + 1 - - local val_err_t = torch.Tensor(val_err) - local _,lmin = val_err_t:min(1) - if val_counter-lmin[1] >= opt.nsmooth then - print('Reducing learning rate') - trainer.optimState.learningRate = trainer.optimState.learningRate/2 - if opt.nildfdx == true then - trainer.optimState.dfdx= nil - end - val_counter = 0 - val_err = {} - reduc_counter = reduc_counter + 1 - if reduc_counter >= opt.nred then - print('Stopping training at iteration '..i) - break - end - end - - collectgarbage() - collectgarbage() - --sanitize(model) - --torch.save(paths.concat(opt.save, 'model_' .. epoch .. '.t7'), classifier) - --torch.save(paths.concat(opt.save, 'optimState_' .. epoch .. '.t7'), trainer.optimState) -end - ---sanitize(classifier) -torch.save(paths.concat(opt.save, 'model.t7'), classifier) +paths.dofile('train.lua') ds_train.roidb = nil collectgarbage() @@ -115,7 +46,11 @@ collectgarbage() -------------------------------------------------------------------------------- print('==> Evaluation') -tester = nnf.Tester(classifier,feat_provider_test) +if opt.algo == 'FRCNN' then + tester = nnf.Tester_FRCNN(model,feat_provider_test) +else + tester = nnf.Tester(classifier,feat_provider_test) +end tester.cachefolder = paths.concat(opt.save,'evaluation',ds_test.dataset_name) diff --git a/train.lua b/train.lua new file mode 100644 index 0000000..ae2891f --- /dev/null +++ b/train.lua @@ -0,0 +1,74 @@ + + +local savedModel = model:clone('weight','bias','running_mean','running_std') + +trainer = nnf.Trainer(classifier,criterion,batch_provider) +trainer.optimState.learningRate = opt.lr + +local conf_classes = {} +table.insert(conf_classes,'background') +for i=1,#classes do + table.insert(conf_classes,classes[i]) +end +trainer.confusion = optim.ConfusionMatrix(conf_classes) + +--[[ +validator = nnf.Tester(classifier,feat_provider_test) +validator.cachefolder = opt.save_base +validator.cachename = 'validation_data.t7' +validator.batch_provider = batch_provider_test +--]] +logger = optim.Logger(paths.concat(opt.save,'log.txt')) +val_err = {} +val_counter = 0 +reduc_counter = 0 + +inputs = torch.FloatTensor() +targets = torch.IntTensor() +for i=1,opt.num_iter do + + print('Iteration: '..i..'/'..opt.num_iter) + inputs,targets = batch_provider:getBatch(inputs,targets) + print('==> Training '..paths.basename(opt.save_base)) + trainer:train(inputs,targets) + print('==> Training Error: '..trainer.fx[i]) + print(trainer.confusion) + + collectgarbage() + + --err = validator:validate(criterion) + --print('==> Validation Error: '..err) + --table.insert(val_err,err) + + logger:add{['train error (iters per batch='..batch_provider.iter_per_batch.. + ')']=trainer.fx[i],['val error']=err, + ['learning rate']=trainer.optimState.learningRate} + + val_counter = val_counter + 1 + + --[[ + local val_err_t = torch.Tensor(val_err) + local _,lmin = val_err_t:min(1) + if val_counter-lmin[1] >= opt.nsmooth then + print('Reducing learning rate') + trainer.optimState.learningRate = trainer.optimState.learningRate/2 + if opt.nildfdx == true then + trainer.optimState.dfdx= nil + end + val_counter = 0 + val_err = {} + reduc_counter = reduc_counter + 1 + if reduc_counter >= opt.nred then + print('Stopping training at iteration '..i) + break + end + end +--]] + collectgarbage() + collectgarbage() + torch.save(paths.concat(opt.save, 'model_' .. epoch .. '.t7'), savedModel) + --torch.save(paths.concat(opt.save, 'optimState_' .. epoch .. '.t7'), trainer.optimState) +end + +torch.save(paths.concat(opt.save, 'model.t7'), savedModel) + diff --git a/utils.lua b/utils.lua index 0255907..6b9f047 100644 --- a/utils.lua +++ b/utils.lua @@ -29,6 +29,22 @@ local function joinTable(input,dim) return output end +local function recursiveResizeAsCopyTyped(t1,t2,type) + if torch.type(t2) == 'table' then + t1 = (torch.type(t1) == 'table') and t1 or {t1} + for key,_ in pairs(t2) do + t1[key], t2[key] = recursiveResizeAsCopyTyped(t1[key], t2[key], type) + end + elseif torch.isTensor(t2) then + local type = type or t2:type() + t1 = torch.isTypeOf(t1,type) and t1 or torch.Tensor():type(type) + t1:resize(t2:size()):copy(t2) + else + error("expecting nested tensors or tables. Got ".. + torch.type(t1).." and "..torch.type(t2).." instead") + end + return t1, t2 +end -------------------------------------------------------------------------------- local function keep_top_k(boxes,top_k) @@ -267,6 +283,7 @@ utils.VOCap = VOCap utils.convertCaffeModelToTorch = convertCaffeModelToTorch utils.reshapeLastLinearLayer = reshapeLastLinearLayer utils.sanitize = sanitize +utils.recursiveResizeAsCopyTyped = recursiveResizeAsCopyType return utils From 417e08277e619d7fed996cd8ab2012e336262512 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 14 Sep 2015 09:43:21 +0200 Subject: [PATCH 19/79] Continue refactoring --- BatchProvider.lua | 95 ++++++++++++++++++++++++++++++++----------- BatchProviderBase.lua | 56 ++++++++++++------------- BatchProviderROI.lua | 57 ++++++++++++++++++++++---- nnf.lua | 1 + 4 files changed, 148 insertions(+), 61 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index e1b817f..eff7abe 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -1,14 +1,76 @@ -local BatchProvider = torch.class('nnf.BatchProvider','nnf.BatchProviderBase') +local BatchProvider,parent = + torch.class('nnf.BatchProvider','nnf.BatchProviderBase') -function BatchProvider:__init(feat_provider) - self.dataset = feat_provider.dataset - self.feat_provider = feat_provider +local argcheck = require 'argcheck' +local initcheck = argcheck{ + pack=true, + noordered=true, + {name="dataset", + type="nnf.DataSetPascal", + help="A dataset class" + }, + {name="nTimesMoreData", + type="number", + opt=true, + help=""}, + {name="iter_per_batch", + type="number", + opt=true, + help=""}, + {name="batch_dim", + type="table", + opt=true, + help=""}, + {name="target_dim", + type="number", + opt=true, + help=""}, + {name="batch_size", + type="number", + opt=true, + help="batch size"}, + {name="fg_fraction", + type="number", + opt=true, + help="foreground fraction in batch" + }, + {name="fg_threshold", + type="number", + opt=true, + help="foreground threshold" + }, + {name="bg_threshold", + type="table", + opt=true, + help="background threshold, in the form {LO,HI}" + }, + {name="createWindow", + type="function", + opt=true, + help="" + }, + {name="do_flip", + type="boolean", + opt=true, + help="sample batches with random flips" + }, +} + +function BatchProvider:__init(...) + parent.__init() self.nTimesMoreData = 10 self.iter_per_batch = 500 self.batch_dim = {256*50} self.target_dim = 1 + + + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end + + --self.dataset = feat_provider.dataset + --self.feat_provider = feat_provider end @@ -113,30 +175,17 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_data,bg_data,fg_label,bg if self.do_flip then flip = torch.random(0,1) == 0 end - --print(bboxes) + for i=1,num_pos do - --local bbox = bboxes[1][{i,{2,5}}] local bbox = {bboxes[1][i][2],bboxes[1][i][3],bboxes[1][i][4],bboxes[1][i][5]} fg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) fg_label[i][1] = bboxes[1][i][6] ---[[ if flip then - fg_label[i][2] = flip_angle(bboxes[1][i][7]) - else - fg_label[i][2] = bboxes[1][i][7] - end -]] end for i=1,num_neg do - --local bbox = bboxes[0][{i,{2,5}}] local bbox = {bboxes[0][i][2],bboxes[0][i][3],bboxes[0][i][4],bboxes[0][i][5]} bg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) bg_label[i][1] = bboxes[0][i][6] ---[[ if flip then - bg_label[i][2] = flip_angle(bboxes[0][i][7]) - else - bg_label[i][2] = bboxes[0][i][7] - end]] end -- return fg_data,bg_data,fg_label,bg_label @@ -209,20 +258,18 @@ function BatchProvider:prepareBatch(batches,targets) return batches,targets end -function BatchProvider:getBatch(batches,targets) +function BatchProvider:getBatch() self._cur = self._cur or math.huge -- we have reached the end of our batch pool, need to recompute if self._cur > self.iter_per_batch then self._batches,self._targets = self:prepareBatch(self._batches,self._targets) self._cur = 1 end - --local batches = batches or torch.FloatTensor() - --local targets = targets or torch.FloatTensor() - local batches = self._batches[self._cur] - local targets = self._targets[self._cur] + self.batches = self._batches[self._cur] + self.targets = self._targets[self._cur] self._cur = self._cur + 1 - return batches, targets + return self.batches, self.targets end diff --git a/BatchProviderBase.lua b/BatchProviderBase.lua index e6c4c36..79ffebb 100644 --- a/BatchProviderBase.lua +++ b/BatchProviderBase.lua @@ -1,3 +1,5 @@ +local argcheck = require 'argcheck' + local function createWindowBase(rec,i,j,is_bg) local label = is_bg == true and 0+1 or rec.label[j]+1 local window = {i,rec.boxes[j][1],rec.boxes[j][2], @@ -29,64 +31,58 @@ end local argcheck = require 'argcheck' local initcheck = argcheck{ - --pack=true, - debug=true, - --noordered=true, --- {name="self", --- type="nnf.BatchProviderBase" --- }, --- {name="dataset", --- type="nnf.DatasetPascal", --- help="A dataset class" --- }, + pack=true, + noordered=true, + {name="dataset", + type="nnf.DataSetPascal", + help="A dataset class" + }, {name="batch_size", type="number", + default=128, help="batch size"}, {name="fg_fraction", type="number", + default=0.25, help="foreground fraction in batch" }, {name="fg_threshold", type="number", + default=0.5, help="foreground threshold" }, {name="bg_threshold", - type="number",--"table", + type="table", + default={0,0.5}, help="background threshold, in the form {LO,HI}" }, --- {name="createWindow", --- type="function", --- default=createWindowBase, --- help="" --- }, + {name="createWindow", + type="function", + default=createWindowBase, + help="" + }, {name="do_flip", type="boolean", + default=true, help="sample batches with random flips" }, - } local BatchProviderBase = torch.class('nnf.BatchProviderBase') -BatchProviderBase.__init = initcheck ---[[ -function BatchProviderBase:__init(dataset) - - self.dataset = dataset - +function BatchProviderBase:__init(...) + --local opts = initcheck(...) + --for k,v in pairs(opts) do self[k] = v end + self.batch_size = 128 self.fg_fraction = 0.25 - self.fg_threshold = 0.5 - self.bg_threshold = {0.0,0.5} - + self.bg_threshold = {0,0.5} self.createWindow = createWindowBase - self.do_flip = true - end ---]] + function BatchProviderBase:setupData() local dataset = self.dataset @@ -139,7 +135,7 @@ function BatchProviderBase:setupData() --return bbT end -function BatchProviderBase:getBatch(input,target) +function BatchProviderBase:getBatch() error("You can't use BatchProviderBase") return input,target end diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 7b6ffd5..18c58e6 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -1,5 +1,48 @@ local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProviderBase') +local argcheck = require 'argcheck' +local initcheck = argcheck{ + pack=true, + noordered=true, + {name="dataset", + type="nnf.DataSetPascal", + help="A dataset class" + }, + {name="batch_size", + type="number", + opt=true, + help="batch size"}, + {name="batch_size", + type="number", + opt=true, + help="batch size"}, + {name="fg_fraction", + type="number", + opt=true, + help="foreground fraction in batch" + }, + {name="fg_threshold", + type="number", + opt=true, + help="foreground threshold" + }, + {name="bg_threshold", + type="table", + opt=true, + help="background threshold, in the form {LO,HI}" + }, + {name="createWindow", + type="function", + opt=true, + help="" + }, + {name="do_flip", + type="boolean", + opt=true, + help="sample batches with random flips" + }, +} + function BatchProviderROI:__init(dataset) local fp = {dataset=dataset} parent:__init(fp) @@ -12,8 +55,6 @@ end -- setup is the same function BatchProviderROI:permuteIdx() - --local fg_num_total = self.fg_num_total - --local bg_num_total = self.bg_num_total local total_img = self.dataset:size() local imgs_per_batch = self.imgs_per_batch @@ -137,22 +178,24 @@ local function getImages(self,img_ids,images,do_flip) end -function BatchProviderROI:getBatch(batches,targets) +function BatchProviderROI:getBatch() local dataset = self.dataset self.fg_num_each = self.fg_fraction * self.batch_size self.bg_num_each = self.batch_size - self.fg_num_each local fg_windows,bg_windows,opts = self:permuteIdx() - --local fg_w,bg_w = self:selectBBoxes(fg_windows,bg_windows) - local batches = batches or {torch.FloatTensor(),torch.FloatTensor()} - local targets = targets or torch.FloatTensor() + self.batches = self.batches or {torch.FloatTensor(),torch.FloatTensor()} + self.targets = self.targets or torch.FloatTensor() + + local batches = self.batches + local targets = self.targets local im_scales, im_sizes = getImages(self,opts.img_idx,batches[1],opts.do_flip) local rois,labels = self:selectBBoxes(fg_windows,bg_windows,im_scales,opts.do_flip, im_sizes) batches[2]:resizeAs(rois):copy(rois) targets:resize(labels:size()):copy(labels) - return batches, targets + return self.batches, self.targets end diff --git a/nnf.lua b/nnf.lua index f4610da..543865e 100644 --- a/nnf.lua +++ b/nnf.lua @@ -6,6 +6,7 @@ require 'xlua' nnf = {} torch.include('nnf','DataSetPascal.lua') +torch.include('nnf','BatchProviderBase.lua') torch.include('nnf','BatchProvider.lua') torch.include('nnf','BatchProviderROI.lua') From b5a0223cc362da5196c05e00e08abd132b2aa6d0 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 14 Sep 2015 15:14:52 +0200 Subject: [PATCH 20/79] Factorized RCNN and FRCNN as feature providers, and modified BatchProviderROI to use FRCNN instead --- BatchProvider.lua | 11 ++-- BatchProviderBase.lua | 6 ++- BatchProviderROI.lua | 96 ++++++++------------------------- FRCNN.lua | 120 +++++++++++++++++++++++++++++++++--------- RCNN.lua | 47 +++++++++++++---- SPP.lua | 21 -------- nnf.lua | 1 + utils.lua | 15 ++++++ 8 files changed, 181 insertions(+), 136 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index eff7abe..d0e4aa4 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -1,6 +1,6 @@ local BatchProvider,parent = torch.class('nnf.BatchProvider','nnf.BatchProviderBase') - +--[[ local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, @@ -55,9 +55,10 @@ local initcheck = argcheck{ help="sample batches with random flips" }, } - +--]] +-- function BatchProvider:__init(...) - parent.__init() + parent:__init() self.nTimesMoreData = 10 self.iter_per_batch = 500 @@ -66,8 +67,8 @@ function BatchProvider:__init(...) self.target_dim = 1 - local opts = initcheck(...) - for k,v in pairs(opts) do self[k] = v end + --local opts = initcheck(...) + --for k,v in pairs(opts) do self[k] = v end --self.dataset = feat_provider.dataset --self.feat_provider = feat_provider diff --git a/BatchProviderBase.lua b/BatchProviderBase.lua index 79ffebb..828a720 100644 --- a/BatchProviderBase.lua +++ b/BatchProviderBase.lua @@ -72,8 +72,6 @@ local initcheck = argcheck{ local BatchProviderBase = torch.class('nnf.BatchProviderBase') function BatchProviderBase:__init(...) - --local opts = initcheck(...) - --for k,v in pairs(opts) do self[k] = v end self.batch_size = 128 self.fg_fraction = 0.25 @@ -81,6 +79,10 @@ function BatchProviderBase:__init(...) self.bg_threshold = {0,0.5} self.createWindow = createWindowBase self.do_flip = true + + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end + end diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 18c58e6..a54afba 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -1,5 +1,5 @@ local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProviderBase') - +--[[ local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, @@ -42,14 +42,11 @@ local initcheck = argcheck{ help="sample batches with random flips" }, } - +--]] function BatchProviderROI:__init(dataset) - local fp = {dataset=dataset} - parent:__init(fp) + parent:__init{dataset=dataset} self.imgs_per_batch = 2 - self.scale = 600 - self.max_size = 1000 - self.image_transformer = nnf.ImageTransformer{} + self.feature_provider = nnf.FRCNN{} end -- setup is the same @@ -93,30 +90,22 @@ function BatchProviderROI:permuteIdx() end -function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales,do_flip,im_sizes) +function BatchProviderROI:selectBBoxes(fg_windows,bg_windows) local fg_num_each = torch.round(self.fg_num_each/self.imgs_per_batch) local bg_num_each = torch.round(self.bg_num_each/self.imgs_per_batch) - local rois = {} + local bboxes = {} local labels = {} for im=1,self.imgs_per_batch do - local im_scale = im_scales[im] local window_idx = torch.randperm(#bg_windows[im]) local end_idx = math.min(bg_num_each,#bg_windows[im]) - local flip = do_flip[im] == 1 - local im_size = im_sizes[im] + local bbox = {} for i=1,end_idx do local curr_idx = bg_windows[im][window_idx[i] ][1] local position = bg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][0][position][{{2,5}}]:clone() - dd:add(-1):mul(im_scale):add(1) - if flip then - local tt = dd[1] - dd[1] = im_size[2]-dd[3] +1 - dd[3] = im_size[2]-tt +1 - end - table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) - table.insert(labels,self.bboxes[curr_idx][0][position][6]) + local dd = self.bboxes[curr_idx][0][position][{{2,6}}] + table.insert(bbox,{dd[1],dd[2],dd[3],dd[4]}) + table.insert(labels,dd[5]) end window_idx = torch.randperm(#fg_windows[im]) @@ -124,60 +113,16 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows,im_scales,do_flip,i for i=1,end_idx do local curr_idx = fg_windows[im][window_idx[i] ][1] local position = fg_windows[im][window_idx[i] ][2] - local dd = self.bboxes[curr_idx][1][position][{{2,5}}]:clone() - dd:add(-1):mul(im_scale):add(1) - if flip then - local tt = dd[1] - dd[1] = im_size[2]-dd[3] +1 - dd[3] = im_size[2]-tt +1 - end - table.insert(rois,{im,dd[1],dd[2],dd[3],dd[4]}) - table.insert(labels,self.bboxes[curr_idx][1][position][6]) + local dd = self.bboxes[curr_idx][1][position][{{2,6}}] + table.insert(bbox,{dd[1],dd[2],dd[3],dd[4]}) + table.insert(labels,dd[5]) end + table.insert(bboxes,torch.FloatTensor(bbox)) end - rois = torch.FloatTensor(rois) labels = torch.IntTensor(labels) - return rois, labels -end - -local function getImages(self,img_ids,images,do_flip) - local dataset = self.dataset - local num_images = img_ids:size(1) - - local imgs = {} - local im_sizes = {} - local im_scales = {} - - for i=1,num_images do - local im = dataset:getImage(img_ids[i]) - im = self.image_transformer:preprocess(im) - local flip = do_flip[i] == 1 - if flip then - im = image.hflip(im) - end - local im_size = im[1]:size() - local im_size_min = math.min(im_size[1],im_size[2]) - local im_size_max = math.max(im_size[1],im_size[2]) - local im_scale = self.scale/im_size_min - if torch.round(im_scale*im_size_max) > self.max_size then - im_scale = self.max_size/im_size_max - end - local im_s = {torch.round(im_size[1]*im_scale),torch.round(im_size[2]*im_scale)} - table.insert(imgs,image.scale(im,im_s[2],im_s[1])) - table.insert(im_sizes,im_s) - table.insert(im_scales,im_scale) - end - -- create single tensor with all images, padding with zero for different sizes - im_sizes = torch.IntTensor(im_sizes) - local max_shape = im_sizes:max(1)[1] - images:resize(num_images,3,max_shape[1],max_shape[2]):zero() - for i=1,num_images do - images[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) - end - return im_scales,im_sizes + return bboxes, labels end - function BatchProviderROI:getBatch() local dataset = self.dataset @@ -186,15 +131,18 @@ function BatchProviderROI:getBatch() local fg_windows,bg_windows,opts = self:permuteIdx() - self.batches = self.batches or {torch.FloatTensor(),torch.FloatTensor()} self.targets = self.targets or torch.FloatTensor() local batches = self.batches local targets = self.targets - local im_scales, im_sizes = getImages(self,opts.img_idx,batches[1],opts.do_flip) - local rois,labels = self:selectBBoxes(fg_windows,bg_windows,im_scales,opts.do_flip, im_sizes) - batches[2]:resizeAs(rois):copy(rois) + local imgs = {} + for i=1,opts.img_idx:size(1) do + table.insert(imgs,dataset:getImage(opts.img_idx[i])) + end + local boxes,labels = self:selectBBoxes(fg_windows,bg_windows) + self.batches = self.feature_provider:getFeature(imgs,boxes,opts.do_flip) + targets:resize(labels:size()):copy(labels) return self.batches, self.targets diff --git a/FRCNN.lua b/FRCNN.lua index 6fd8061..02fcb25 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -1,42 +1,112 @@ +local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local FRCNN = torch.class('nnf.FRCNN') -function FRCNN:__init(dataset) - self.dataset = dataset +function FRCNN:__init() + self.image_transformer = nnf.ImageTransformer{} self.scale = {600} - self.max_dim = 1000 + self.max_size = 1000 self.randomscale = true - --self.sz_conv_standard = 13 - self.step_standard = 16 - --self.offset0 = 21 - --self.offset = 6.5 - --self.inputArea = 224^2 - end -function FRCNN:getScale(I) - local min_size = math.min(I[2],I[3]) - local max_size = math.max(I[2],I[3]) - local scale - if max_size <= self.max_dim then - scale = self.scale[1]/min_size +function FRCNN:processImages(output_imgs,input_imgs,do_flip) + local num_images = #input_imgs + + local imgs = {} + local im_sizes = {} + local im_scales = {} + + for i=1,num_images do + local im = input_imgs[i] + im = self.image_transformer:preprocess(im) + local flip = do_flip and (do_flip[i] == 1) or false + if flip then + im = image.hflip(im) + end + local scale = self.scale[math.random(1,#self.scale)] + local im_size = im[1]:size() + local im_size_min = math.min(im_size[1],im_size[2]) + local im_size_max = math.max(im_size[1],im_size[2]) + local im_scale = scale/im_size_min + if torch.round(im_scale*im_size_max) > self.max_size then + im_scale = self.max_size/im_size_max + end + local im_s = {torch.round(im_size[1]*im_scale),torch.round(im_size[2]*im_scale)} + table.insert(imgs,image.scale(im,im_s[2],im_s[1])) + table.insert(im_sizes,im_s) + table.insert(im_scales,im_scale) + end + -- create single tensor with all images, padding with zero for different sizes + im_sizes = torch.IntTensor(im_sizes) + local max_shape = im_sizes:max(1)[1] + output_imgs:resize(num_images,3,max_shape[1],max_shape[2]):zero() + for i=1,num_images do + output_imgs[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) + end + return im_scales,im_sizes +end + +-- only for single image ATM, not working yet +local function project_im_rois_eval(im_rois,scales) + local levels + local rois = torch.FloatTensor() + if #scales > 1 then + local scales = torch.FloatTensor(scales) + local widths = im_rois[{{},3}] - im_rois[{{},1}] + 1 + local heights = im_rois[{{},4}] - im_rois[{{}, 2}] + 1 + + local areas = widths * heights + local scaled_areas = areas:view(-1,1) * torch.pow(scales:view(1,-1),2) + local diff_areas = torch.abs(scaled_areas - 224 * 224) + levels = select(2, diff_areas:min(2)) else - scale = self.max_dim/max_size + levels = torch.FloatTensor() + rois:resize(im_rois:size(1),5) + rois[{{},1}]:fill(1) + rois[{{},{2,5}}]:copy(im_rois):add(-1):mul(scales[1]):add(1) end - return scale + + return rois end -function FRCNN:projectBBoxes(bboxes,scale) - return (bboxes-1)*scale+1 + +local function project_im_rois(rois,im_rois,scales,do_flip,imgs_size) + local total_bboxes = 0 + local cumul_bboxes = {0} + for i=1,#scales do + total_bboxes = total_bboxes + im_rois[i]:size(1) + table.insert(cumul_bboxes,total_bboxes) + end + rois:resize(total_bboxes,5) + for i=1,#scales do + local idx = {cumul_bboxes[i]+1,cumul_bboxes[i+1]} + rois[{idx,1}]:fill(i) + rois[{idx,{2,5}}]:copy(im_rois[i]):add(-1):mul(scales[i]):add(1) + if do_flip and do_flip[i] == 1 then + flipBoundingBoxes(rois[{idx,{2,5}}],imgs_size[{i,2}]) + end + end + return rois end -function FRCNN:getFeatures(i,flip) - local I = self.dataset:getImage(i) - local bboxes = self.dataset:attachProposals(i) - I = prepareImage(I) - if flip then - +function FRCNN:getFeature(imgs,bboxes,flip) + --local flip = flip==nil and false or flip + + self._feat = self._feat or {torch.FloatTensor(),torch.FloatTensor()} + + if torch.isTensor(imgs) then + imgs = {imgs} + if type(bboxes) == 'table' then + bboxes = torch.FloatTensor(bboxes) + bboxes = bboxes:dim() == 1 and {bboxes:view(1,-1)} or {bboxes} + end end + + local im_scales, im_sizes = self:processImages(self._feat[1],imgs,flip) + project_im_rois(self._feat[2],bboxes,im_scales,flip,im_sizes) + + return self._feat end + diff --git a/RCNN.lua b/RCNN.lua index 03651d3..4b6d4e9 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -1,7 +1,10 @@ +local argcheck = require 'argcheck' +local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes + local RCNN = torch.class('nnf.RCNN') function RCNN:__init(dataset) - self.dataset = dataset + --self.dataset = dataset self.image_transformer = nnf.ImageTransformer{ mean_pix={123.68/255,116.779/255,103.939/255}} @@ -12,9 +15,10 @@ function RCNN:__init(dataset) end -function RCNN:getCrop(im_idx,bbox,flip) +function RCNN:getCrop(output,I,bbox) -- suppose I is in BGR, as image_mean -- [x1 y1 x2 y2] order + --[[ local flip = flip==nil and false or flip if self.curr_im_idx ~= im_idx or self.curr_doflip ~= flip then @@ -35,7 +39,8 @@ function RCNN:getCrop(im_idx,bbox,flip) bbox[1] = I:size(3)-bbox[3]+1 bbox[3] = I:size(3)-tt +1 end - + --]] + -- local crop_size = self.crop_size local image_mean = self.image_mean local padding = self.padding @@ -46,8 +51,6 @@ function RCNN:getCrop(im_idx,bbox,flip) local crop_width = crop_size; local crop_height = crop_size; - --local bbox = {bbox[2],bbox[1],bbox[4],bbox[3]} - ------ if padding > 0 or use_square then local scale = crop_size/(crop_size - padding*2) @@ -107,12 +110,11 @@ function RCNN:getCrop(im_idx,bbox,flip) {pad_w+1,pad_w+crop_width}}] end - --patch = torch.zeros(3,crop_size,crop_size):typeAs(I) - patch = torch.zeros(3,crop_size,crop_size):float() + --patch = torch.FloatTensor(3,crop_size,crop_size):zero() - patch[{{},{pad_h+1,pad_h+crop_height}, {pad_w+1,pad_w+crop_width}}] = tmp + output[{{},{pad_h+1,pad_h+crop_height}, {pad_w+1,pad_w+crop_width}}] = tmp - return patch + return output end @@ -124,4 +126,31 @@ function RCNN:getFeature(im_idx,bbox,flip) return crop_feat end +function RCNN:getFeature(im,bbox,flip) + local flip = flip==nil and false or flip + + if type(im) == 'number' then + + end + if type(bbox) == 'table' then + bbox = torch.FloatTensor(bbox) + end + + im = self.image_transformer:preprocess(im) + bbox = bbox:dim() == 1 and bbox:view(1,-1) or bbox + local num_boxes = bbox:size(1) + + if flip then + im = image.hflip(im) + flipBoundingBoxes(bbox,im:size(3)) + end + + self._feat = self._feat or torch.FloatTensor() + self._feat:resize(num_boxes,3,self.crop_size,self.crop_size):zero() + for i=1,num_boxes do + self:getCrop(self._feat[i],im,bbox[i]) + end + + return self._feat +end diff --git a/SPP.lua b/SPP.lua index cfd67a1..780e880 100644 --- a/SPP.lua +++ b/SPP.lua @@ -62,26 +62,6 @@ function SPP:getFeature(im_idx,bbox,flip) return feat end - -local function cleaningForward(input,model) - local currentOutput = model.modules[1]:updateOutput(input) - for i=2,#model.modules do - collectgarbage() - collectgarbage() - currentOutput = model.modules[i]:updateOutput(currentOutput) - model.modules[i-1].output:resize() - model.modules[i-1].gradInput:resize() - if model.modules[i-1].gradWeight then - model.modules[i-1].gradWeight:resize() - end - if model.modules[i-1].gradBias then - model.modules[i-1].gradBias:resize() - end - end - model.output = currentOutput - return currentOutput -end - function SPP:getConv5(im_idx,flip) local scales = self.scales local flip = flip or false @@ -129,7 +109,6 @@ function SPP:getConv5(im_idx,flip) local Ir = image.scale(I,sc,sr):type(mtype) local f = self.model:forward(Ir) - --local f = cleaningForward(Ir,self.model) feats.rsp[i] = torch.FloatTensor(f:size()):copy(f) end diff --git a/nnf.lua b/nnf.lua index 543865e..0110bf4 100644 --- a/nnf.lua +++ b/nnf.lua @@ -12,6 +12,7 @@ torch.include('nnf','BatchProviderROI.lua') --torch.include('nnf','SPP.lua') torch.include('nnf','RCNN.lua') +torch.include('nnf','FRCNN.lua') torch.include('nnf','ROIPooling.lua') torch.include('nnf','Trainer.lua') diff --git a/utils.lua b/utils.lua index 6b9f047..c158e86 100644 --- a/utils.lua +++ b/utils.lua @@ -45,6 +45,20 @@ local function recursiveResizeAsCopyTyped(t1,t2,type) end return t1, t2 end + +-- modify bbox input +local function flipBoundingBoxes(bbox, im_width) + if bbox:dim() == 1 then + local tt = bbox[1] + bbox[1] = im_width-bbox[3]+1 + bbox[3] = im_width-tt +1 + else + local tt = bbox[{{},1}]:clone() + bbox[{{},1}]:fill(im_width+1):add(-1,bbox[{{},3}]) + bbox[{{},3}]:fill(im_width+1):add(-1,tt) + end +end + -------------------------------------------------------------------------------- local function keep_top_k(boxes,top_k) @@ -284,6 +298,7 @@ utils.convertCaffeModelToTorch = convertCaffeModelToTorch utils.reshapeLastLinearLayer = reshapeLastLinearLayer utils.sanitize = sanitize utils.recursiveResizeAsCopyTyped = recursiveResizeAsCopyType +utils.flipBoundingBoxes = flipBoundingBoxes return utils From 0adcd6757099ae8a5f50733b9b2dd902ce1d5ad7 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 14 Sep 2015 22:22:05 +0200 Subject: [PATCH 21/79] Start cleaning data.lua and add a config file with parameters of the models --- config.lua | 101 ++++++++++++++++++++++++++++++++++++ data.lua | 40 +------------- spp_compute_conv5_cache.lua | 40 ++++++++++++++ 3 files changed, 142 insertions(+), 39 deletions(-) create mode 100644 config.lua create mode 100644 spp_compute_conv5_cache.lua diff --git a/config.lua b/config.lua new file mode 100644 index 0000000..343e224 --- /dev/null +++ b/config.lua @@ -0,0 +1,101 @@ + +local configs = {} + +local image_transformer_params = { + mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1} +} + +configs.image_transformer_params = image_transformer_params +configs.algo = {} + +-------------------------------------------------------------------------------- +-- RCNN +-------------------------------------------------------------------------------- + +local fp_params = { + crop_size = 227, + padding = 16, + use_square = false, +} +local bp_params = { + iter_per_batch = 100, + nTimesMoreData = 10, + batch_size = opt.batch_size, + fg_fraction = opt.fg_frac, + fg_threshold = 0.5, + bg_threshold = {0.0,0.5}, + do_flip = true, + batch_dim = {3,fp_params.crop_size,fp_params.crop_size}, +} + +local RCNN = { + fp_params=fp_params, + bp_params=bp_params +} + +configs.algo.RCNN = RCNN + +-------------------------------------------------------------------------------- +-- SPP +-------------------------------------------------------------------------------- + +local fp_params = { + scales = {480,576,688,874,1200}, + randomscale = true, + sz_conv_standard = 13, + step_standard = 16, + offset0 = 21, + offset = 6.5, + inputArea = 224^2, +} +local num_chns = 256 +local pooling_scales = {{1,1},{2,2},{3,3},{6,6}} +local pyr = torch.Tensor(pooling_scales):t() +local pooled_size = pyr[1]:dot(pyr[2]) +local feat_dim = {num_chns*pooled_size} +local bp_params = { + iter_per_batch = 500, + nTimesMoreData = 10, + batch_size = opt.batch_size, + fg_fraction = opt.fg_frac, + fg_threshold = 0.5, + bg_threshold = {0.1,0.5}, + do_flip = true, + batch_dim = feat_dim, +} + +local SPP = { + fp_params=fp_params, + bp_params=bp_params +} + +configs.algo.SPP = SPP + +-------------------------------------------------------------------------------- +-- Fast-RCNN +-------------------------------------------------------------------------------- + +local fp_params = { + scale = {600} + max_size = 1000 +} +local bp_params = { + imgs_per_batch = 2, + batch_size = opt.batch_size, + fg_fraction = opt.fg_frac, + fg_threshold = 0.5, + bg_threshold = {0.0,0.5}, + do_flip = true, +} + +local FRCNN = { + fp_params=fp_params, + bp_params=bp_params +} + +configs.algo.FRCNN = FRCNN + + +return configs diff --git a/data.lua b/data.lua index f1cb9d5..ca8676b 100644 --- a/data.lua +++ b/data.lua @@ -101,45 +101,7 @@ else feat_provider_test.model = features end --------------------------------------------------------------------------------- --- Compute conv5 feature cache (for SPP) --------------------------------------------------------------------------------- -if opt.algo == 'SPP' then - print('Preparing conv5 features for '..ds_train.dataset_name..' ' - ..ds_train.image_set) - local feat_cachedir = feat_provider.cachedir - for i=1,ds_train:size() do - xlua.progress(i,ds_train:size()) - local im_name = ds_train.img_ids[i] - local cachefile = paths.concat(feat_cachedir,im_name) - if not paths.filep(cachefile..'.h5') then - local f = feat_provider:getConv5(i) - end - if not paths.filep(cachefile..'_flip.h5') then - local f = feat_provider:getConv5(i,true) - end - if i%50 == 0 then - collectgarbage() - collectgarbage() - end - end - - print('Preparing conv5 features for '..ds_test.dataset_name..' ' - ..ds_test.image_set) - local feat_cachedir = feat_provider_test.cachedir - for i=1,ds_test:size() do - xlua.progress(i,ds_test:size()) - local im_name = ds_test.img_ids[i] - local cachefile = paths.concat(feat_cachedir,im_name) - if not paths.filep(cachefile..'.h5') then - local f = feat_provider_test:getConv5(i) - end - if i%50 == 0 then - collectgarbage() - collectgarbage() - end - end -end +-- compute feature cache features = nil model = nil diff --git a/spp_compute_conv5_cache.lua b/spp_compute_conv5_cache.lua new file mode 100644 index 0000000..0754acf --- /dev/null +++ b/spp_compute_conv5_cache.lua @@ -0,0 +1,40 @@ +-------------------------------------------------------------------------------- +-- Compute conv5 feature cache (for SPP) +-------------------------------------------------------------------------------- +if opt.algo == 'SPP' then + print('Preparing conv5 features for '..ds_train.dataset_name..' ' + ..ds_train.image_set) + local feat_cachedir = feat_provider.cachedir + for i=1,ds_train:size() do + xlua.progress(i,ds_train:size()) + local im_name = ds_train.img_ids[i] + local cachefile = paths.concat(feat_cachedir,im_name) + if not paths.filep(cachefile..'.h5') then + local f = feat_provider:getConv5(i) + end + if not paths.filep(cachefile..'_flip.h5') then + local f = feat_provider:getConv5(i,true) + end + if i%50 == 0 then + collectgarbage() + collectgarbage() + end + end + + print('Preparing conv5 features for '..ds_test.dataset_name..' ' + ..ds_test.image_set) + local feat_cachedir = feat_provider_test.cachedir + for i=1,ds_test:size() do + xlua.progress(i,ds_test:size()) + local im_name = ds_test.img_ids[i] + local cachefile = paths.concat(feat_cachedir,im_name) + if not paths.filep(cachefile..'.h5') then + local f = feat_provider_test:getConv5(i) + end + if i%50 == 0 then + collectgarbage() + collectgarbage() + end + end +end + From 086da4d4e0a63ab8b40f12bbe9e39cd659687d4c Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 17 Sep 2015 10:29:44 +0200 Subject: [PATCH 22/79] Add batched cropping in SPP. Still needs some clean up --- SPP.lua | 155 +++++++++++++++++++++++++++++++++++++++++++++++++---- config.lua | 9 ++-- data.lua | 69 +++++++----------------- 3 files changed, 171 insertions(+), 62 deletions(-) diff --git a/SPP.lua b/SPP.lua index 780e880..a482464 100644 --- a/SPP.lua +++ b/SPP.lua @@ -1,4 +1,5 @@ local hdf5 = require 'hdf5' +local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local SPP = torch.class('nnf.SPP') @@ -37,17 +38,26 @@ function SPP:getCrop(im_idx,bbox,flip) self.curr_doflip = flip end - local bbox = bbox if flip then - local tt = bbox[1] - bbox[1] = self.curr_im_feats.imSize[3]-bbox[3]+1 - bbox[3] = self.curr_im_feats.imSize[3]-tt +1 + flipBoundingBoxes(bbox,self.curr_im_feats.imSize[3]) end - local bestScale,bestBbox = self:getBestSPPScale(bbox,self.curr_im_feats.imSize,self.curr_im_feats.scales) - local box_norm = self:getResposeBoxes(bestBbox) + --local bestScale,bestBbox = self:getBestSPPScale(bbox,self.curr_im_feats.imSize,self.curr_im_feats.scales) + --local box_norm = self:getResposeBoxes(bestBbox) + + --local crop_feat = self:getCroppedFeat(self.curr_im_feats.rsp[bestScale],box_norm) + + local feat = self.curr_im_feats + local bestScale,bestbboxes,bboxes_norm,projected_bb = + self:projectBoxes(feat, bboxes, feat.scales) + + local crop_feat = {} + for i=1,bbox:size(1) do + local bbox_ = projected_bb[i] + local patch = feat.rsp[bestScale[i]][{{},{bbox_[2],bbox_[4]},{bbox_[1],bbox_[3]}}] + table.insert(crop_feat,patch) + end - local crop_feat = self:getCroppedFeat(self.curr_im_feats.rsp[bestScale],box_norm) return crop_feat end @@ -57,11 +67,20 @@ function SPP:getFeature(im_idx,bbox,flip) local crop_feat = self:getCrop(im_idx,bbox,flip) - local feat = self.spp_pooler:forward(crop_feat) + --local feat = self.spp_pooler:forward(crop_feat) + local feat = torch.FloatTensor(#crop_feat,feat_size) + for i=1,#crop_feat do + feat[i] = self.spp_pooler:forward(crop_feat[i]) + end return feat end +-- SPP is meant to keep a cache of the conv5 features +-- for fast training. In this case, we suppose that +-- we provide the image index in the dataset. +-- We can also use an image as input, in which case it +-- won't save a conv5 cache. function SPP:getConv5(im_idx,flip) local scales = self.scales local flip = flip or false @@ -73,6 +92,10 @@ function SPP:getConv5(im_idx,flip) if not cachedir then cachedir = '' end + + if not self.dataset then + self.use_cache = false + end local cachefile = paths.concat(self.cachedir,self.dataset.img_ids[im_idx]) @@ -90,7 +113,12 @@ function SPP:getConv5(im_idx,flip) feats.rsp[tostring(i)] = nil end else - local I = self.dataset:getImage(im_idx):float() + local I + if type(im_idx) == 'number' and self.dataset then + I = self.dataset:getImage(im_idx):float() + elseif torch.isTensor(im_idx) then + I = im_idx + end I = self.image_transformer:preprocess(I) if flip then I = image.hflip(I) @@ -232,6 +260,115 @@ function SPP:getCroppedFeat(feat,bbox) end + + +local function unique(bboxes) + local idx = {} + local is_unique = torch.ones(bboxes:size(1)) + for i=1,bboxes:size(1) do + local b = bboxes[i] + local n = b[1]..'_'..b[2]..'_'..b[3]..'_'..b[4]..'_'..b[5] + if idx[n] then + is_unique[i] = 0 + else + idx[n] = i + end + end + return is_unique +end + +-- given a table with the conv5 features at different scales and bboxes in +-- the original image, project the bboxes in the conv5 space +function SPP:projectBoxes(feat, bboxes, scales) + -- bboxes is a nx4 Tensor with candidate bounding boxes + -- in [x1, y1, x2, y2] format + local imSize = feat.imSize + + local scales = scales or self.scales + local min_dim = math.min(imSize[2],imSize[3]) + + local sz_conv_standard = self.sz_conv_standard + local step_standard = self.step_standard + + local nboxes = bboxes:size(1) + + -- get best SPP scale + local bestScale = torch.FloatTensor(nboxes) + + if self.randomscale then + bestScale:random(1,#scales) + else + local bboxArea = boxes.new():resize(nboxes):zero() + bboxArea:map2(bboxes[{{},3}],bboxes[{{},1}],function(xx,xx2,xx1) return xx2-xx1+1 end) + bboxArea:map2(bboxes[{{},4}],bboxes[{{},2}],function(xx,xx2,xx1) return xx*(xx2-xx1+1) end) + + local expected_scale = bboxArea:pow(-0.5):mul(sz_conv_standard*step_standard*min_dim) + expected_scale:round() + + local nbboxDiffArea = torch.FloatTensor(#scales,nboxes) + + for i=1,#scales do + nbboxDiffArea[i]:copy(expected_scale):add(-scales[i]):abs() + end + + bestScale = select(2,nbboxDiffArea:min(1))[1] + end + + local mul_factor = torch.FloatTensor(nboxes,1):copy(bestScale) + local idx = 0 + mul_factor:apply(function(x) + idx = idx + 1 + return (scales[x]-1)/(min_dim-1) + end) + + local bestbboxes = torch.FloatTensor(nboxes,4):copy(bboxes) + bestbboxes:add(-1):cmul(mul_factor:expand(nboxes,4)):add(1) + + -- response boxes + + local offset0 = self.offset0 + local offset = self.offset + + local bboxes_norm = bestbboxes:clone() + bboxes_norm[{{},{1,2}}]:add(-offset0 + offset):div(step_standard):add( 0.5) + bboxes_norm[{{},{1,2}}]:floor():add(1) + bboxes_norm[{{},{3,4}}]:add(-offset0 - offset):div(step_standard):add(-0.5) + bboxes_norm[{{},{3,4}}]:ceil():add(1) + + local x0gtx1 = bboxes_norm[{{},1}]:gt(bboxes_norm[{{},3}]) + local y0gty1 = bboxes_norm[{{},2}]:gt(bboxes_norm[{{},4}]) + + bboxes_norm[{{},1}][x0gtx1]:add(bboxes_norm[{{},3}][x0gtx1]):div(2) + bboxes_norm[{{},3}][x0gtx1]:copy(bboxes_norm[{{},1}][x0gtx1]) + + bboxes_norm[{{},2}][y0gty1]:add(bboxes_norm[{{},4}][y0gty1]):div(2) + bboxes_norm[{{},4}][y0gty1]:copy(bboxes_norm[{{},2}][y0gty1]) + + -- remove repeated projections + if self.dedup then + local is_unique = unique(torch.cat(bboxes_norm,bestScale:view(-1,1),2)) + local lin = torch.range(1,is_unique:size(1)):long() -- can also use cumsum instead + bboxes_norm = bboxes_norm:index(1,lin[is_unique]) + end + -- clamp on boundaries + + local projected_bb = bboxes_norm:clone() + + for i=1,#scales do + local this_scale = bestScale:eq(i) + if this_scale:numel() > 0 then + projected_bb[{{},2}][this_scale] = projected_bb[{{},2}][this_scale]:clamp(1,feat.rsp[i]:size(2)) + projected_bb[{{},4}][this_scale] = projected_bb[{{},4}][this_scale]:clamp(1,feat.rsp[i]:size(2)) + projected_bb[{{},1}][this_scale] = projected_bb[{{},1}][this_scale]:clamp(1,feat.rsp[i]:size(3)) + projected_bb[{{},3}][this_scale] = projected_bb[{{},3}][this_scale]:clamp(1,feat.rsp[i]:size(3)) + end + end + + return bestScale,bestbboxes,bboxes_norm,projected_bb +end + + + function SPP:type(t_type) self._type = t_type --self.spp_pooler = self.spp_pooler:type(t_type) diff --git a/config.lua b/config.lua index 343e224..3456c39 100644 --- a/config.lua +++ b/config.lua @@ -32,7 +32,8 @@ local bp_params = { local RCNN = { fp_params=fp_params, - bp_params=bp_params + bp_params=bp_params, + bp = nnf.BatchProvider } configs.algo.RCNN = RCNN @@ -68,7 +69,8 @@ local bp_params = { local SPP = { fp_params=fp_params, - bp_params=bp_params + bp_params=bp_params, + bp = nnf.BatchProvider } configs.algo.SPP = SPP @@ -92,7 +94,8 @@ local bp_params = { local FRCNN = { fp_params=fp_params, - bp_params=bp_params + bp_params=bp_params, + bp = nnf.BatchProviderROI } configs.algo.FRCNN = FRCNN diff --git a/data.lua b/data.lua index ca8676b..cb168e1 100644 --- a/data.lua +++ b/data.lua @@ -8,7 +8,7 @@ testCache = paths.concat(opt.save_base,'testCache.t7') local pooler local feat_dim - +--[[ if opt.algo == 'SPP' then local conv_list = features:findModules(opt.backend..'.SpatialConvolution') local num_chns = conv_list[#conv_list].nOutputPlane @@ -19,9 +19,16 @@ if opt.algo == 'SPP' then elseif opt.algo == 'RCNN' then feat_dim = {3,227,227} end +--]] image_transformer = nnf.ImageTransformer{mean_pix=image_mean} + +local FP = nnf[opt.algo] +local fp_params = config.algo[opt.algo].fp_params +local bp_params = config.algo[opt.algo].bp_params +local BP = config.algo[opt.algo].bp + if paths.filep(trainCache) then print('Loading train metadata from cache') batch_provider = torch.load(trainCache) @@ -32,32 +39,11 @@ else ds_train = nnf.DataSetPascal{image_set='trainval',classes=classes,year=opt.year, datadir=opt.datadir,roidbdir=opt.roidbdir} - if opt.algo == 'SPP' then - feat_provider = nnf.SPP(ds_train)-- remove features here to reduce cache size - feat_provider.cachedir = paths.concat(opt.cache,'features',opt.netType) - feat_provider.randomscale = true - feat_provider.scales = {600} - feat_provider.spp_pooler = pooler:clone() - feat_provider.image_transformer = image_transformer - elseif opt.algo == 'RCNN' then - feat_provider = nnf.RCNN(ds_train) - feat_provider.crop_size = feat_dim[2] - feat_provider.image_transformer = image_transformer - else - error(("Detection framework '%s' not available"):format(opt.algo)) - end - - print('==> Preparing BatchProvider for training') - batch_provider = nnf.BatchProvider(feat_provider) - batch_provider.iter_per_batch = opt.ipb - batch_provider.nTimesMoreData = opt.ntmd - batch_provider.batch_size = opt.batch_size - batch_provider.fg_fraction = opt.fg_frac - batch_provider.bg_threshold = {0.0,0.5} - batch_provider.do_flip = true - batch_provider.batch_dim = feat_dim + + feat_provider = FP(ds_train) + batch_provider = BP(bp_params) batch_provider:setupData() - + torch.save(trainCache,batch_provider) feat_provider.model = features end @@ -71,30 +57,13 @@ if paths.filep(testCache) then else ds_test = nnf.DataSetPascal{image_set='test',classes=classes,year=opt.year, datadir=opt.datadir,roidbdir=opt.roidbdir} - if opt.algo == 'SPP' then - feat_provider_test = nnf.SPP(ds_test) - feat_provider_test.randomscale = false - feat_provider_test.cachedir = paths.concat(opt.cache,'features',opt.netType) - feat_provider_test.scales = {600} - feat_provider_test.spp_pooler = pooler:clone() - feat_provider_test.image_transformer = image_transformer - elseif opt.algo == 'RCNN' then - feat_provider_test = nnf.RCNN(ds_test) - feat_provider_test.crop_size = feat_dim[2] - feat_provider_test.image_transformer = image_transformer - else - error(("Detection framework '%s' not available"):format(opt.algo)) - end - - print('==> Preparing BatchProvider for validation') - batch_provider_test = nnf.BatchProvider(feat_provider_test) - batch_provider_test.iter_per_batch = 500--opt.ipb - batch_provider_test.nTimesMoreData = 10--opt.ntmd - batch_provider_test.batch_size = opt.batch_size - batch_provider_test.fg_fraction = opt.fg_frac - batch_provider_test.bg_threshold = {0.0,0.5} - batch_provider_test.do_flip = false - batch_provider_test.batch_dim = feat_dim + + + feat_provider_test = FP(ds_test) + -- disable flip ? + bp_params.do_flip = false + batch_provider_test = BP(bp_params) + batch_provider_test:setupData() torch.save(testCache,batch_provider_test) From e36f6be5fe2cf3e334a0ed5c558cf1a651ce56a7 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Fri, 18 Sep 2015 09:12:25 +0200 Subject: [PATCH 23/79] The basics of RCNN and Fast-RCNN seems to be working after refactoring. Need to check SPP --- BatchProvider.lua | 33 +++++++++++++++++++++------------ FRCNN.lua | 5 +++++ RCNN.lua | 10 +++++++--- SPP.lua | 27 ++++++++++++++++++++------- config.lua | 40 +++++++++++++++++++++++----------------- 5 files changed, 76 insertions(+), 39 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index d0e4aa4..f8a2c29 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -58,14 +58,14 @@ local initcheck = argcheck{ --]] -- function BatchProvider:__init(...) - parent:__init() + parent:__init(...) self.nTimesMoreData = 10 self.iter_per_batch = 500 - self.batch_dim = {256*50} + self.feat_provider = nnf.RCNN(self.dataset) + self.batch_dim = self.feat_provider.output_size--{256*50} self.target_dim = 1 - --local opts = initcheck(...) --for k,v in pairs(opts) do self[k] = v end @@ -161,13 +161,13 @@ local function flip_angle(x) end -- depends on the model -function BatchProvider:prepareFeatures(im_idx,bboxes,fg_data,bg_data,fg_label,bg_label) +function BatchProvider:prepareFeatures(im_idx,bboxes,fg_label,bg_label) local num_pos = bboxes[1] and #bboxes[1] or 0 local num_neg = bboxes[0] and #bboxes[0] or 0 - fg_data:resize(num_pos,unpack(self.batch_dim)) - bg_data:resize(num_neg,unpack(self.batch_dim)) + --fg_data:resize(num_pos,unpack(self.batch_dim)) + --bg_data:resize(num_neg,unpack(self.batch_dim)) fg_label:resize(num_pos,self.target_dim) bg_label:resize(num_neg,self.target_dim) @@ -177,18 +177,27 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_data,bg_data,fg_label,bg flip = torch.random(0,1) == 0 end + local s_boxes = {} for i=1,num_pos do local bbox = {bboxes[1][i][2],bboxes[1][i][3],bboxes[1][i][4],bboxes[1][i][5]} - fg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) + table.insert(s_boxes,bbox) + --fg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) fg_label[i][1] = bboxes[1][i][6] end for i=1,num_neg do local bbox = {bboxes[0][i][2],bboxes[0][i][3],bboxes[0][i][4],bboxes[0][i][5]} - bg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) + table.insert(s_boxes,bbox) + --bg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) bg_label[i][1] = bboxes[0][i][6] end - + + -- compute the features + local feats = self.feat_provider:getFeature(im_idx,s_boxes,flip) + local fg_data = feats:narrow(1,1,num_pos) + local bg_data = feats:narrow(1,num_pos+1,num_neg) + + return fg_data, bg_data -- return fg_data,bg_data,fg_label,bg_label end @@ -215,8 +224,8 @@ function BatchProvider:prepareBatch(batches,targets) local bg_counter = 0 local fg_data,bg_data,fg_label,bg_label - fg_data = torch.FloatTensor() - bg_data = torch.FloatTensor() + --fg_data = torch.FloatTensor() + --bg_data = torch.FloatTensor() fg_label = torch.IntTensor() bg_label = torch.IntTensor() @@ -236,7 +245,7 @@ function BatchProvider:prepareBatch(batches,targets) bboxes[0] = bg_w[curr_idx] bboxes[1] = fg_w[curr_idx] - self:prepareFeatures(curr_idx,bboxes,fg_data,bg_data,fg_label,bg_label) + fg_data,bg_data = self:prepareFeatures(curr_idx,bboxes,fg_label,bg_label) for j=1,nbg do bg_counter = bg_counter + 1 diff --git a/FRCNN.lua b/FRCNN.lua index 02fcb25..277d1c6 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -102,6 +102,11 @@ function FRCNN:getFeature(imgs,bboxes,flip) bboxes = torch.FloatTensor(bboxes) bboxes = bboxes:dim() == 1 and {bboxes:view(1,-1)} or {bboxes} end + if flip == false then + flip = {0} + elseif flip == true then + flip = {1} + end end local im_scales, im_sizes = self:processImages(self._feat[1],imgs,flip) diff --git a/RCNN.lua b/RCNN.lua index 4b6d4e9..ae42729 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -4,7 +4,7 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local RCNN = torch.class('nnf.RCNN') function RCNN:__init(dataset) - --self.dataset = dataset + self.dataset = dataset self.image_transformer = nnf.ImageTransformer{ mean_pix={123.68/255,116.779/255,103.939/255}} @@ -12,6 +12,8 @@ function RCNN:__init(dataset) self.image_mean = nil self.padding = 16 self.use_square = false + + self.output_size = {3,self.crop_size,self.crop_size} end @@ -130,7 +132,8 @@ function RCNN:getFeature(im,bbox,flip) local flip = flip==nil and false or flip if type(im) == 'number' then - + assert(self.dataset, 'you must provide a dataset if using numeric indices') + im = self.dataset:getImage(im) end if type(bbox) == 'table' then bbox = torch.FloatTensor(bbox) @@ -146,7 +149,8 @@ function RCNN:getFeature(im,bbox,flip) end self._feat = self._feat or torch.FloatTensor() - self._feat:resize(num_boxes,3,self.crop_size,self.crop_size):zero() + + self._feat:resize(num_boxes,table.unpack(self.output_size)):zero() for i=1,num_boxes do self:getCrop(self._feat[i],im,bbox[i]) diff --git a/SPP.lua b/SPP.lua index a482464..1e5a056 100644 --- a/SPP.lua +++ b/SPP.lua @@ -4,11 +4,18 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local SPP = torch.class('nnf.SPP') --TODO vectorize code ? -function SPP:__init(dataset,model) +function SPP:__init(model,dataset) self.dataset = dataset self.model = model - self.spp_pooler = inn.SpatialPyramidPooling({{1,1},{2,2},{3,3},{6,6}}):float() + + self.num_feat_chns = 256 + self.pooling_scales = {{1,1},{2,2},{3,3},{6,6}} + local pyr = torch.Tensor(pooling_scales):t() + local pooled_size = pyr[1]:dot(pyr[2]) + self.output_size = {num_chns*pooled_size} + + self.spp_pooler = inn.SpatialPyramidPooling(self.pooling_scales):float() self.image_transformer = nnf.ImageTransformer{} -- paper=864, their code=874 @@ -37,7 +44,12 @@ function SPP:getCrop(im_idx,bbox,flip) self.curr_im_feats = self:getConv5(im_idx,flip) self.curr_doflip = flip end - + + if type(bbox) == 'table' then + bbox = torch.FloatTensor(bbox) + end + bbox = bbox:dim() == 1 and bbox:view(1,-1) or bbox + if flip then flipBoundingBoxes(bbox,self.curr_im_feats.imSize[3]) end @@ -49,7 +61,7 @@ function SPP:getCrop(im_idx,bbox,flip) local feat = self.curr_im_feats local bestScale,bestbboxes,bboxes_norm,projected_bb = - self:projectBoxes(feat, bboxes, feat.scales) + self:projectBoxes(feat, bbox, feat.scales) local crop_feat = {} for i=1,bbox:size(1) do @@ -68,12 +80,13 @@ function SPP:getFeature(im_idx,bbox,flip) local crop_feat = self:getCrop(im_idx,bbox,flip) --local feat = self.spp_pooler:forward(crop_feat) - local feat = torch.FloatTensor(#crop_feat,feat_size) + self._feat = self._feat or torch.FloatTensor() + self._feat:resize(#crop_feat,table.unpack(self.output_size)) for i=1,#crop_feat do - feat[i] = self.spp_pooler:forward(crop_feat[i]) + self._feat[i]:copy(self.spp_pooler:forward(crop_feat[i])) end - return feat + return self._feat end -- SPP is meant to keep a cache of the conv5 features diff --git a/config.lua b/config.lua index 3456c39..806e76a 100644 --- a/config.lua +++ b/config.lua @@ -15,9 +15,10 @@ configs.algo = {} -------------------------------------------------------------------------------- local fp_params = { - crop_size = 227, - padding = 16, - use_square = false, + crop_size = 227, + padding = 16, + use_square = false, + image_transformer = image_transformer } local bp_params = { iter_per_batch = 100, @@ -27,7 +28,7 @@ local bp_params = { fg_threshold = 0.5, bg_threshold = {0.0,0.5}, do_flip = true, - batch_dim = {3,fp_params.crop_size,fp_params.crop_size}, +-- batch_dim = {3,fp_params.crop_size,fp_params.crop_size}, } local RCNN = { @@ -41,21 +42,25 @@ configs.algo.RCNN = RCNN -------------------------------------------------------------------------------- -- SPP -------------------------------------------------------------------------------- - -local fp_params = { - scales = {480,576,688,874,1200}, - randomscale = true, - sz_conv_standard = 13, - step_standard = 16, - offset0 = 21, - offset = 6.5, - inputArea = 224^2, -} +-- local num_chns = 256 local pooling_scales = {{1,1},{2,2},{3,3},{6,6}} local pyr = torch.Tensor(pooling_scales):t() local pooled_size = pyr[1]:dot(pyr[2]) local feat_dim = {num_chns*pooled_size} + +local fp_params = { + scales = {480,576,688,874,1200}, + randomscale = true, + sz_conv_standard = 13, + step_standard = 16, + offset0 = 21, + offset = 6.5, + inputArea = 224^2, + pooling_scales = pooling_scales, + num_feat_chns = num_chns, + image_transformer = image_transformer +} local bp_params = { iter_per_batch = 500, nTimesMoreData = 10, @@ -64,7 +69,7 @@ local bp_params = { fg_threshold = 0.5, bg_threshold = {0.1,0.5}, do_flip = true, - batch_dim = feat_dim, +-- batch_dim = feat_dim, } local SPP = { @@ -80,8 +85,9 @@ configs.algo.SPP = SPP -------------------------------------------------------------------------------- local fp_params = { - scale = {600} - max_size = 1000 + scale = {600}, + max_size = 1000, + image_transformer = image_transformer } local bp_params = { imgs_per_batch = 2, From 39becec4c3cdbff94ae6f7beb15520040dfe7b2f Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 19 Sep 2015 16:05:43 +0200 Subject: [PATCH 24/79] Add train/test option to feature providers --- FRCNN.lua | 106 ++++++++++++++++++++++++++++++----------------- ImageDetect.lua | 91 ++++++---------------------------------- RCNN.lua | 39 +++++++---------- SPP.lua | 20 +++++++-- Tester_FRCNN.lua | 45 +++++++++----------- 5 files changed, 131 insertions(+), 170 deletions(-) diff --git a/FRCNN.lua b/FRCNN.lua index 277d1c6..1a331bc 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -6,26 +6,47 @@ function FRCNN:__init() self.image_transformer = nnf.ImageTransformer{} self.scale = {600} self.max_size = 1000 - self.randomscale = true + + self.train = true - --self.inputArea = 224^2 + self.inputArea = 224^2 +end + +function FRCNN:training() + self.train = true +end + +function FRCNN:evaluate() + self.train = false end function FRCNN:processImages(output_imgs,input_imgs,do_flip) - local num_images = #input_imgs + local num_images + local im + if self.train then + num_images = #input_imgs + else + num_images = #self.scale + im = self.image_transformer:preprocess(input_imgs[1]) + end local imgs = {} local im_sizes = {} local im_scales = {} for i=1,num_images do - local im = input_imgs[i] - im = self.image_transformer:preprocess(im) + local scale + if self.train then + im = input_imgs[i] + im = self.image_transformer:preprocess(im) + scale = self.scale[math.random(1,#self.scale)] + else + scale = self.scale[i] + end local flip = do_flip and (do_flip[i] == 1) or false if flip then im = image.hflip(im) end - local scale = self.scale[math.random(1,#self.scale)] local im_size = im[1]:size() local im_size_min = math.min(im_size[1],im_size[2]) local im_size_max = math.max(im_size[1],im_size[2]) @@ -48,44 +69,46 @@ function FRCNN:processImages(output_imgs,input_imgs,do_flip) return im_scales,im_sizes end --- only for single image ATM, not working yet -local function project_im_rois_eval(im_rois,scales) - local levels - local rois = torch.FloatTensor() - if #scales > 1 then +function FRCNN:projectImageROIs(rois,im_rois,scales,do_flip,imgs_size) + -- we consider two cases: + -- During training, the scales are sampled randomly per image, so + -- in the same image all the bboxes have the same scale, and we only + -- need to take into account the different images that are provided. + -- During testing, we consider that there is only one image at a time, + -- and the scale for each bbox is the one which makes its area closest + -- to self.inputArea + if self.train or #scales == 1 then + local total_bboxes = 0 + local cumul_bboxes = {0} + for i=1,#scales do + total_bboxes = total_bboxes + im_rois[i]:size(1) + table.insert(cumul_bboxes,total_bboxes) + end + rois:resize(total_bboxes,5) + for i=1,#scales do + local idx = {cumul_bboxes[i]+1,cumul_bboxes[i+1]} + rois[{idx,1}]:fill(i) + rois[{idx,{2,5}}]:copy(im_rois[i]):add(-1):mul(scales[i]):add(1) + if do_flip and do_flip[i] == 1 then + flipBoundingBoxes(rois[{idx,{2,5}}],imgs_size[{i,2}]) + end + end + else -- not yet tested local scales = torch.FloatTensor(scales) local widths = im_rois[{{},3}] - im_rois[{{},1}] + 1 local heights = im_rois[{{},4}] - im_rois[{{}, 2}] + 1 local areas = widths * heights - local scaled_areas = areas:view(-1,1) * torch.pow(scales:view(1,-1),2) - local diff_areas = torch.abs(scaled_areas - 224 * 224) - levels = select(2, diff_areas:min(2)) - else - levels = torch.FloatTensor() - rois:resize(im_rois:size(1),5) - rois[{{},1}]:fill(1) - rois[{{},{2,5}}]:copy(im_rois):add(-1):mul(scales[1]):add(1) - end + local scaled_areas = areas:view(-1,1) * scales:view(1,-1):pow(2) + local diff_areas = scaled_areas:add(-1,self.inputArea):abs() -- no memory copy + local levels = select(2, diff_areas:min(2)) - return rois -end - - -local function project_im_rois(rois,im_rois,scales,do_flip,imgs_size) - local total_bboxes = 0 - local cumul_bboxes = {0} - for i=1,#scales do - total_bboxes = total_bboxes + im_rois[i]:size(1) - table.insert(cumul_bboxes,total_bboxes) - end - rois:resize(total_bboxes,5) - for i=1,#scales do - local idx = {cumul_bboxes[i]+1,cumul_bboxes[i+1]} - rois[{idx,1}]:fill(i) - rois[{idx,{2,5}}]:copy(im_rois[i]):add(-1):mul(scales[i]):add(1) - if do_flip and do_flip[i] == 1 then - flipBoundingBoxes(rois[{idx,{2,5}}],imgs_size[{i,2}]) + local num_boxes = im_rois:size(1) + rois:resize(num_boxes,5) + for i=1,num_boxes do + local s = levels[i] + rois[{i,{2,5}}]:copy(im_rois[i]):add(-1):mul(scales[s]):add(1) + rois[{i,1}] = s end end return rois @@ -110,8 +133,13 @@ function FRCNN:getFeature(imgs,bboxes,flip) end local im_scales, im_sizes = self:processImages(self._feat[1],imgs,flip) - project_im_rois(self._feat[2],bboxes,im_scales,flip,im_sizes) + self:projectImageROIs(self._feat[2],bboxes,im_scales,flip,im_sizes) return self._feat end +-- do the bbox regression +function FRCNN:postProcess(im,boxes,output) + -- not implemented yet + return output +end diff --git a/ImageDetect.lua b/ImageDetect.lua index 27bf8e2..841294c 100644 --- a/ImageDetect.lua +++ b/ImageDetect.lua @@ -1,88 +1,23 @@ local ImageDetect = torch.class('nnf.ImageDetect') +local recursiveResizeAsCopyTyped = utils.recursiveResizeAsCopyTyped -function ImageDetect:__init(model) +function ImageDetect:__init(model, feat_provider) self.model = model - self.image_transformer = nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, - raw_scale = 255, - swap = {3,2,1}} - self.scale = {600} - self.max_size = 1000 - self.sm = nn.SoftMax():cuda() -end - - -local function getImages(self,images,im) - local num_scales = #self.scale - - local imgs = {} - local im_sizes = {} - local im_scales = {} - - im = self.image_transformer:preprocess(im) - - local im_size = im[1]:size() - local im_size_min = math.min(im_size[1],im_size[2]) - local im_size_max = math.max(im_size[1],im_size[2]) - for i=1,num_scales do - local im_scale = self.scale[i]/im_size_min - if torch.round(im_scale*im_size_max) > self.max_size then - im_scale = self.max_size/im_size_max - end - local im_s = {im_size[1]*im_scale,im_size[2]*im_scale} - table.insert(imgs,image.scale(im,im_s[2],im_s[1])) - table.insert(im_sizes,im_s) - table.insert(im_scales,im_scale) - end - -- create single tensor with all images, padding with zero for different sizes - im_sizes = torch.IntTensor(im_sizes) - local max_shape = im_sizes:max(1)[1] - images:resize(num_scales,3,max_shape[1],max_shape[2]):zero() - for i=1,num_scales do - images[i][{{},{1,imgs[i]:size(2)},{1,imgs[i]:size(3)}}]:copy(imgs[i]) - end - return im_scales -end - -local function project_im_rois(im_rois,scales) - local levels - local rois = torch.FloatTensor() - if #scales > 1 then - local scales = torch.FloatTensor(scales) - local widths = im_rois[{{},3}] - im_rois[{{},1}] + 1 - local heights = im_rois[{{},4}] - im_rois[{{}, 2}] + 1 - - local areas = widths * heights - local scaled_areas = areas:view(-1,1) * torch.pow(scales:view(1,-1),2) - local diff_areas = torch.abs(scaled_areas - 224 * 224) - levels = select(2, diff_areas:min(2)) - else - levels = torch.FloatTensor() - rois:resize(im_rois:size(1),5) - rois[{{},1}]:fill(1) - rois[{{},{2,5}}]:copy(im_rois):add(-1):mul(scales[1]):add(1) - end - - return rois - + self.feat_provider = feat_provider + --self.sm = nn.SoftMax():cuda() end -- supposes boxes is in [x1,y1,x2,y2] format function ImageDetect:detect(im,boxes) - local inputs = {torch.FloatTensor(),torch.FloatTensor()} - local im_scales = getImages(self,inputs[1],im) - inputs[2] = project_im_rois(boxes,im_scales) + local ttype = self.model.output:type() + + local inputs = self.feat_provider:getFeature(im,boxes) + self.inputs,inputs = recursiveResizeAsCopyTyped(self.inputs,inputs,ttype) - local inputs_cuda = {torch.CudaTensor(),torch.CudaTensor()} - inputs_cuda[1]:resize(inputs[1]:size()):copy(inputs[1]) - inputs_cuda[2]:resize(inputs[2]:size()):copy(inputs[2]) - local output0 = self.model:forward(inputs_cuda) - local output = self.sm:forward(output0):float() - --[[ - for i=1,#im_scales do - local dd = boxes:clone() - dd:add(-1):mul(im_scale[i]):add(1) + local output0 = self.model:forward(self.inputs) + local output = self.feat_provider:postProcess(im,boxes,output0) + --self.sm:forward(output0) - end - --]] - return output + self.output,output = recursiveResizeAsCopyTyped(self.output,output,'torch.FloatTensor') + return self.output end diff --git a/RCNN.lua b/RCNN.lua index ae42729..8682e53 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -14,35 +14,21 @@ function RCNN:__init(dataset) self.use_square = false self.output_size = {3,self.crop_size,self.crop_size} - + self.train = true +end + +function RCNN:training() + self.train = true +end + +function RCNN:evaluate() + self.train = false end function RCNN:getCrop(output,I,bbox) -- suppose I is in BGR, as image_mean -- [x1 y1 x2 y2] order - --[[ - local flip = flip==nil and false or flip - - if self.curr_im_idx ~= im_idx or self.curr_doflip ~= flip then - self.curr_im_idx = im_idx - self.curr_im_feats = self.dataset:getImage(im_idx):float() - self.curr_im_feats = self.image_transformer:preprocess(self.curr_im_feats) - if flip then - self.curr_im_feats = image.hflip(self.curr_im_feats) - end - self.curr_doflip = flip - end - - local I = self.curr_im_feats - local bbox = bbox - - if flip then - local tt = bbox[1] - bbox[1] = I:size(3)-bbox[3]+1 - bbox[3] = I:size(3)-tt +1 - end - --]] - -- + local crop_size = self.crop_size local image_mean = self.image_mean local padding = self.padding @@ -158,3 +144,8 @@ function RCNN:getFeature(im,bbox,flip) return self._feat end + +-- don't do anything. could be the bbox regression or SVM, but I won't add it here +function RCNN:postProcess(im,bbox,output) + return output +end diff --git a/SPP.lua b/SPP.lua index 1e5a056..0b1b08b 100644 --- a/SPP.lua +++ b/SPP.lua @@ -20,7 +20,6 @@ function SPP:__init(model,dataset) -- paper=864, their code=874 self.scales = {480,576,688,874,1200} -- 874 - self.randomscale = true self.sz_conv_standard = 13 self.step_standard = 16 @@ -33,8 +32,16 @@ function SPP:__init(model,dataset) self.cachedir = nil + self.train = true end +function SPP:training() + self.train = true +end + +function SPP:evaluate() + self.train = false +end function SPP:getCrop(im_idx,bbox,flip) local flip = flip or false @@ -200,7 +207,8 @@ function SPP:getBestSPPScale(bbox,imSize,scales) local bestScale - if self.randomscale then + if self.train then + -- in training, select the scales randomly bestScale = torch.random(1,num_scales) else local inputArea = self.inputArea @@ -308,7 +316,8 @@ function SPP:projectBoxes(feat, bboxes, scales) -- get best SPP scale local bestScale = torch.FloatTensor(nboxes) - if self.randomscale then + if self.train then + -- in training, select the scales randomly bestScale:random(1,#scales) else local bboxArea = boxes.new():resize(nboxes):zero() @@ -380,7 +389,10 @@ function SPP:projectBoxes(feat, bboxes, scales) return bestScale,bestbboxes,bboxes_norm,projected_bb end - +-- don't do anything. could be the bbox regression or SVM, but I won't add it here +function SPP:postProcess(im,bbox,output) + return output +end function SPP:type(t_type) self._type = t_type diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua index 3a541e0..fc4cdc8 100644 --- a/Tester_FRCNN.lua +++ b/Tester_FRCNN.lua @@ -64,24 +64,14 @@ function Tester:test(iteration) local module = self.module local feat_provider = self.feat_provider - local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) - paths.mkdir(pathfolder) - module:evaluate() + feat_provider:evaluate() dataset:loadROIDB() - local feats = torch.FloatTensor() - local feats_batched = {} - local feats_cuda = torch.CudaTensor() - - local output = torch.FloatTensor() - - local output_dim = module:get(module:size()) - - local softmax = nn.SoftMax():float() - + local detec = nnf.ImageDetect(module, feat_provider) local boxes - -- + local im + local aboxes = {} for i=1,dataset.num_classes do table.insert(aboxes,{}) @@ -95,20 +85,20 @@ function Tester:test(iteration) local timer = torch.Timer() local timer2 = torch.Timer() local timer3 = torch.Timer() - local detec = nnf.ImageDetect(module) + for i=1,dataset:size() do timer:reset() io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); boxes = dataset:getROIBoxes(i):float() - local im = dataset:getImage(i) + im = dataset:getImage(i) timer3:reset() local output = detec:detect(im,boxes) - local add_bg = 0 - if dataset.num_classes ~= output:size(2) then -- if there is no svm + local add_bg = 1--0 + --if dataset.num_classes ~= output:size(2) then -- if there is no svm --output = softmax:forward(output) - add_bg = 1 - end + -- add_bg = 1 + --end local tt = 0 local tt2 = timer3:time().real @@ -140,10 +130,11 @@ function Tester:test(iteration) end io.write((' prepare feat time: %.3f, forward time: %.3f, select time: %.3fs, total time: %.3fs\n'):format(tt,tt2,timer2:time().real,timer:time().real)); - --collectgarbage() - --mattorch.save(paths.concat(pathfolder,dataset.img_ids[i]..'.mat'),output:double()) end + local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) + paths.mkdir(pathfolder) + for i = 1,dataset.num_classes do -- go back through and prune out detections below the found threshold for j = 1,dataset:size() do @@ -156,10 +147,14 @@ function Tester:test(iteration) end end end - save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. - dataset.dataset_name..self.suffix) - torch.save(save_file, aboxes) + --save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. + -- dataset.dataset_name..self.suffix) + --torch.save(save_file, aboxes) end + save_file = paths.concat(pathfolder, 'boxes_'.. + dataset.dataset_name..self.suffix) + torch.save(save_file, aboxes) + local res = {} for i=1,dataset.num_classes do From e6e5e184344c46fdb59c03d49efa2bd83313448b Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 19 Sep 2015 17:29:20 +0200 Subject: [PATCH 25/79] Fix bug in ROIPooling --- ROIPooling.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROIPooling.lua b/ROIPooling.lua index af12402..3ca6d82 100644 --- a/ROIPooling.lua +++ b/ROIPooling.lua @@ -36,7 +36,7 @@ function ROIPooling:updateOutput(input) rois = self._rois end - if not self._type then self._type = output:type() end + if not self._type then self._type = self.output:type() end if #self.pooler < num_rois then local diff = num_rois - #self.pooler From 757f9016ba918ddf51ceb4ecff94b0794ec6a964 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 19 Sep 2015 18:40:44 +0200 Subject: [PATCH 26/79] Continue refactoring. SPP is not working ATM --- BatchProvider.lua | 22 ++++++------- BatchProviderBase.lua | 13 +++++--- FRCNN.lua | 22 +++++++++---- ImageDetect.lua | 9 ++--- RCNN.lua | 18 ++++++++++ SPP.lua | 76 ++++++++++++++++++++++++++++++++++++++----- Tester_FRCNN.lua | 10 +++--- utils.lua | 2 +- 8 files changed, 131 insertions(+), 41 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index f8a2c29..344e0b1 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -44,11 +44,6 @@ local initcheck = argcheck{ opt=true, help="background threshold, in the form {LO,HI}" }, - {name="createWindow", - type="function", - opt=true, - help="" - }, {name="do_flip", type="boolean", opt=true, @@ -63,6 +58,7 @@ function BatchProvider:__init(...) self.nTimesMoreData = 10 self.iter_per_batch = 500 + self.dataset = dataset self.feat_provider = nnf.RCNN(self.dataset) self.batch_dim = self.feat_provider.output_size--{256*50} self.target_dim = 1 @@ -154,12 +150,6 @@ function BatchProvider:selectBBoxes(fg_windows,bg_windows) return fg_w,bg_w end - --- specific for angle estimation -local function flip_angle(x) - return (-x)%360 -end - -- depends on the model function BatchProvider:prepareFeatures(im_idx,bboxes,fg_label,bg_label) @@ -229,6 +219,8 @@ function BatchProvider:prepareBatch(batches,targets) fg_label = torch.IntTensor() bg_label = torch.IntTensor() + local pass_index = torch.type(self.feat_provider) == 'nnf.SPP' and true or false + print('==> Preparing Batch Data') for i=1,opts.img_idx_end do xlua.progress(i,opts.img_idx_end) @@ -245,7 +237,13 @@ function BatchProvider:prepareBatch(batches,targets) bboxes[0] = bg_w[curr_idx] bboxes[1] = fg_w[curr_idx] - fg_data,bg_data = self:prepareFeatures(curr_idx,bboxes,fg_label,bg_label) + local data + if pass_index then + data = curr_idx + else + data = dataset:getImage(curr_idx) + end + fg_data,bg_data = self:prepareFeatures(data,bboxes,fg_label,bg_label) for j=1,nbg do bg_counter = bg_counter + 1 diff --git a/BatchProviderBase.lua b/BatchProviderBase.lua index 828a720..35e5ada 100644 --- a/BatchProviderBase.lua +++ b/BatchProviderBase.lua @@ -28,7 +28,7 @@ local function createWindowAngle(rec,i,j,is_bg) return window end - +--[[ local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, @@ -67,12 +67,13 @@ local initcheck = argcheck{ help="sample batches with random flips" }, } - +--]] local BatchProviderBase = torch.class('nnf.BatchProviderBase') function BatchProviderBase:__init(...) + self.dataset = nil self.batch_size = 128 self.fg_fraction = 0.25 self.fg_threshold = 0.5 @@ -80,11 +81,15 @@ function BatchProviderBase:__init(...) self.createWindow = createWindowBase self.do_flip = true - local opts = initcheck(...) - for k,v in pairs(opts) do self[k] = v end + --local opts = initcheck(...) + --for k,v in pairs(opts) do self[k] = v end end +-- allow changing the way self.bboxes are formatted +function BatchProviderBase:setCreateWindow(createWindow) + self.createWindow = createWindow +end function BatchProviderBase:setupData() local dataset = self.dataset diff --git a/FRCNN.lua b/FRCNN.lua index 1a331bc..e893683 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -20,7 +20,8 @@ function FRCNN:evaluate() self.train = false end -function FRCNN:processImages(output_imgs,input_imgs,do_flip) +function FRCNN:processImages(input_imgs,do_flip) + local output_imgs = self._feat[1] local num_images local im if self.train then @@ -69,7 +70,8 @@ function FRCNN:processImages(output_imgs,input_imgs,do_flip) return im_scales,im_sizes end -function FRCNN:projectImageROIs(rois,im_rois,scales,do_flip,imgs_size) +function FRCNN:projectImageROIs(im_rois,scales,do_flip,imgs_size) + local rois = self._feat[2] -- we consider two cases: -- During training, the scales are sampled randomly per image, so -- in the same image all the bboxes have the same scale, and we only @@ -95,6 +97,7 @@ function FRCNN:projectImageROIs(rois,im_rois,scales,do_flip,imgs_size) end else -- not yet tested local scales = torch.FloatTensor(scales) + im_rois = im_rois[1] local widths = im_rois[{{},3}] - im_rois[{{},1}] + 1 local heights = im_rois[{{},4}] - im_rois[{{}, 2}] + 1 @@ -115,16 +118,16 @@ function FRCNN:projectImageROIs(rois,im_rois,scales,do_flip,imgs_size) end function FRCNN:getFeature(imgs,bboxes,flip) - --local flip = flip==nil and false or flip - self._feat = self._feat or {torch.FloatTensor(),torch.FloatTensor()} + -- if it's in test mode, adapt inputs if torch.isTensor(imgs) then imgs = {imgs} if type(bboxes) == 'table' then bboxes = torch.FloatTensor(bboxes) - bboxes = bboxes:dim() == 1 and {bboxes:view(1,-1)} or {bboxes} + bboxes = bboxes:dim() == 1 and bboxes:view(1,-1) or bboxes end + bboxes = {bboxes} if flip == false then flip = {0} elseif flip == true then @@ -132,8 +135,8 @@ function FRCNN:getFeature(imgs,bboxes,flip) end end - local im_scales, im_sizes = self:processImages(self._feat[1],imgs,flip) - self:projectImageROIs(self._feat[2],bboxes,im_scales,flip,im_sizes) + local im_scales, im_sizes = self:processImages(imgs,flip) + self:projectImageROIs(bboxes,im_scales,flip,im_sizes) return self._feat end @@ -143,3 +146,8 @@ function FRCNN:postProcess(im,boxes,output) -- not implemented yet return output end + +function FRCNN:compute(model, inputs) + return model:forward(inputs) +end + diff --git a/ImageDetect.lua b/ImageDetect.lua index 841294c..e75d018 100644 --- a/ImageDetect.lua +++ b/ImageDetect.lua @@ -1,5 +1,5 @@ local ImageDetect = torch.class('nnf.ImageDetect') -local recursiveResizeAsCopyTyped = utils.recursiveResizeAsCopyTyped +local recursiveResizeAsCopyTyped = paths.dofile('utils.lua').recursiveResizeAsCopyTyped function ImageDetect:__init(model, feat_provider) self.model = model @@ -9,13 +9,14 @@ end -- supposes boxes is in [x1,y1,x2,y2] format function ImageDetect:detect(im,boxes) + local feat_provider = self.feat_provider local ttype = self.model.output:type() - local inputs = self.feat_provider:getFeature(im,boxes) + local inputs = feat_provider:getFeature(im,boxes) self.inputs,inputs = recursiveResizeAsCopyTyped(self.inputs,inputs,ttype) - local output0 = self.model:forward(self.inputs) - local output = self.feat_provider:postProcess(im,boxes,output0) + local output0 = feat_provider:compute(self.model, self.inputs) + local output = feat_provider:postProcess(im,boxes,output0) --self.sm:forward(output0) self.output,output = recursiveResizeAsCopyTyped(self.output,output,'torch.FloatTensor') diff --git a/RCNN.lua b/RCNN.lua index 8682e53..29a5f1f 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -15,6 +15,7 @@ function RCNN:__init(dataset) self.output_size = {3,self.crop_size,self.crop_size} self.train = true + self.max_batch_size = 128 end function RCNN:training() @@ -149,3 +150,20 @@ end function RCNN:postProcess(im,bbox,output) return output end + +function RCNN:compute(model,inputs) + local inputs_s = inputs:split(self.max_batch_size,1) + + self.output = self.output or inputs.new() + + for idx, f in ipairs(inputs_s) do + local output0 = model:forward(f) + local fs = f:size(1) + if idx == 1 then + local ss = output0[1]:size():totable() + self.output:resize(inputs:size(1),table.unpack(ss)) + end + self.output:narrow(1,(idx-1)*self.max_batch_size+1,fs):copy(output0) + end + return self.output +end diff --git a/SPP.lua b/SPP.lua index 0b1b08b..f98210b 100644 --- a/SPP.lua +++ b/SPP.lua @@ -11,11 +11,11 @@ function SPP:__init(model,dataset) self.num_feat_chns = 256 self.pooling_scales = {{1,1},{2,2},{3,3},{6,6}} - local pyr = torch.Tensor(pooling_scales):t() + local pyr = torch.Tensor(self.pooling_scales):t() local pooled_size = pyr[1]:dot(pyr[2]) - self.output_size = {num_chns*pooled_size} + self.output_size = {self.num_feat_chns*pooled_size} - self.spp_pooler = inn.SpatialPyramidPooling(self.pooling_scales):float() + --self.spp_pooler = inn.SpatialPyramidPooling(self.pooling_scales):float() self.image_transformer = nnf.ImageTransformer{} -- paper=864, their code=874 @@ -43,6 +43,28 @@ function SPP:evaluate() self.train = false end +-- here just to check +function SPP:getCrop_old(im_idx,bbox,flip) + local flip = flip or false + + if self.curr_im_idx ~= im_idx or self.curr_doflip ~= flip then + self.curr_im_idx = im_idx + self.curr_im_feats = self:getConv5(im_idx,flip) + self.curr_doflip = flip + end + + if flip then + flipBoundingBoxes(bbox,self.curr_im_feats.imSize[3]) + end + + local bestScale,bestBbox = self:getBestSPPScale(bbox,self.curr_im_feats.imSize,self.curr_im_feats.scales) + local box_norm = self:getResposeBoxes(bestBbox) + + local crop_feat = self:getCroppedFeat(self.curr_im_feats.rsp[bestScale],box_norm) + + return crop_feat,box_norm +end + function SPP:getCrop(im_idx,bbox,flip) local flip = flip or false @@ -73,6 +95,8 @@ function SPP:getCrop(im_idx,bbox,flip) local crop_feat = {} for i=1,bbox:size(1) do local bbox_ = projected_bb[i] +-- print(bbox_) +-- print(i) local patch = feat.rsp[bestScale[i]][{{},{bbox_[2],bbox_[4]},{bbox_[1],bbox_[3]}}] table.insert(crop_feat,patch) end @@ -81,6 +105,17 @@ function SPP:getCrop(im_idx,bbox,flip) return crop_feat end +-- here just to check +function SPP:getFeature_old(im_idx,bbox,flip) + local flip = flip or false + + local crop_feat = self:getCrop_old(im_idx,bbox,flip) + + local feat = self.spp_pooler:forward(crop_feat) + return feat +end + + function SPP:getFeature(im_idx,bbox,flip) local flip = flip or false @@ -113,11 +148,15 @@ function SPP:getConv5(im_idx,flip) cachedir = '' end + local im_name if not self.dataset then self.use_cache = false + im_name = '' + else + im_name = self.dataset.img_ids[im_idx] end - local cachefile = paths.concat(self.cachedir,self.dataset.img_ids[im_idx]) + local cachefile = paths.concat(cachedir,im_name) if flip then cachefile = cachefile..'_flip' @@ -324,7 +363,7 @@ function SPP:projectBoxes(feat, bboxes, scales) bboxArea:map2(bboxes[{{},3}],bboxes[{{},1}],function(xx,xx2,xx1) return xx2-xx1+1 end) bboxArea:map2(bboxes[{{},4}],bboxes[{{},2}],function(xx,xx2,xx1) return xx*(xx2-xx1+1) end) - local expected_scale = bboxArea:pow(-0.5):mul(sz_conv_standard*step_standard*min_dim) + local expected_scale = bboxArea:float():pow(-0.5):mul(sz_conv_standard*step_standard*min_dim) expected_scale:round() local nbboxDiffArea = torch.FloatTensor(#scales,nboxes) @@ -353,10 +392,13 @@ function SPP:projectBoxes(feat, bboxes, scales) local bboxes_norm = bestbboxes:clone() bboxes_norm[{{},{1,2}}]:add(-offset0 + offset):div(step_standard):add( 0.5) - bboxes_norm[{{},{1,2}}]:floor():add(1) + --bboxes_norm[{{},{1,2}}]:floor():add(1) bboxes_norm[{{},{3,4}}]:add(-offset0 - offset):div(step_standard):add(-0.5) - bboxes_norm[{{},{3,4}}]:ceil():add(1) - + --bboxes_norm[{{},{3,4}}]:ceil():add(1) +print(bestbboxes) +--print(bestbboxes[881]) +print(bboxes_norm) +--print(bboxes_norm[881]) local x0gtx1 = bboxes_norm[{{},1}]:gt(bboxes_norm[{{},3}]) local y0gty1 = bboxes_norm[{{},2}]:gt(bboxes_norm[{{},4}]) @@ -386,6 +428,7 @@ function SPP:projectBoxes(feat, bboxes, scales) end end + projected_bb:floor() return bestScale,bestbboxes,bboxes_norm,projected_bb end @@ -394,6 +437,23 @@ function SPP:postProcess(im,bbox,output) return output end +function SPP:compute(model,inputs) + local inputs_s = inputs:split(self.max_batch_size,1) + + self.output = self.output or inputs.new() + + for idx, f in ipairs(inputs_s) do + local output0 = model:forward(f) + local fs = f:size(1) + if idx == 1 then + local ss = output0[1]:size():totable() + self.output:resize(inputs:size(1),table.unpack(ss)) + end + self.output:narrow(1,(idx-1)*self.max_batch_size+1,fs):copy(output0) + end + return self.output +end + function SPP:type(t_type) self._type = t_type --self.spp_pooler = self.spp_pooler:type(t_type) diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua index fc4cdc8..fef9519 100644 --- a/Tester_FRCNN.lua +++ b/Tester_FRCNN.lua @@ -94,17 +94,16 @@ function Tester:test(iteration) timer3:reset() local output = detec:detect(im,boxes) - local add_bg = 1--0 - --if dataset.num_classes ~= output:size(2) then -- if there is no svm - --output = softmax:forward(output) - -- add_bg = 1 - --end + local add_bg = 1 local tt = 0 local tt2 = timer3:time().real timer2:reset() + -- do a NMS for each class, based on the scores from the classifier for j=1,dataset.num_classes do local scores = output:select(2,j+add_bg) + -- only select detections with a score greater than thresh + -- this avoid doing NMS on too many bboxes with low score local idx = torch.range(1,scores:numel()):long() local idx2 = scores:gt(thresh[j]) idx = idx[idx2] @@ -123,6 +122,7 @@ function Tester:test(iteration) aboxes[j][i] = torch.FloatTensor() end + -- remove low scoring boxes and update threshold if i%1000 == 0 then aboxes[j],thresh[j] = keep_top_k(aboxes[j],max_per_set) end diff --git a/utils.lua b/utils.lua index c158e86..aa7e54a 100644 --- a/utils.lua +++ b/utils.lua @@ -297,7 +297,7 @@ utils.VOCap = VOCap utils.convertCaffeModelToTorch = convertCaffeModelToTorch utils.reshapeLastLinearLayer = reshapeLastLinearLayer utils.sanitize = sanitize -utils.recursiveResizeAsCopyTyped = recursiveResizeAsCopyType +utils.recursiveResizeAsCopyTyped = recursiveResizeAsCopyTyped utils.flipBoundingBoxes = flipBoundingBoxes return utils From b3b12d118f8ef6e1099499c4378e36d4c1d2747d Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 19 Sep 2015 23:27:18 +0200 Subject: [PATCH 27/79] Fixed SPP. Gives the same result as before --- SPP.lua | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/SPP.lua b/SPP.lua index f98210b..9c46c0c 100644 --- a/SPP.lua +++ b/SPP.lua @@ -62,7 +62,7 @@ function SPP:getCrop_old(im_idx,bbox,flip) local crop_feat = self:getCroppedFeat(self.curr_im_feats.rsp[bestScale],box_norm) - return crop_feat,box_norm + return crop_feat,box_norm,bestBbox end function SPP:getCrop(im_idx,bbox,flip) @@ -392,21 +392,18 @@ function SPP:projectBoxes(feat, bboxes, scales) local bboxes_norm = bestbboxes:clone() bboxes_norm[{{},{1,2}}]:add(-offset0 + offset):div(step_standard):add( 0.5) - --bboxes_norm[{{},{1,2}}]:floor():add(1) + bboxes_norm[{{},{1,2}}]:floor():add(1) bboxes_norm[{{},{3,4}}]:add(-offset0 - offset):div(step_standard):add(-0.5) - --bboxes_norm[{{},{3,4}}]:ceil():add(1) -print(bestbboxes) ---print(bestbboxes[881]) -print(bboxes_norm) ---print(bboxes_norm[881]) + bboxes_norm[{{},{3,4}}]:ceil():add(1) + local x0gtx1 = bboxes_norm[{{},1}]:gt(bboxes_norm[{{},3}]) local y0gty1 = bboxes_norm[{{},2}]:gt(bboxes_norm[{{},4}]) - bboxes_norm[{{},1}][x0gtx1]:add(bboxes_norm[{{},3}][x0gtx1]):div(2) - bboxes_norm[{{},3}][x0gtx1]:copy(bboxes_norm[{{},1}][x0gtx1]) + bboxes_norm[{{},1}][x0gtx1] = bboxes_norm[{{},1}][x0gtx1]:add(bboxes_norm[{{},3}][x0gtx1]):div(2) + bboxes_norm[{{},3}][x0gtx1] = (bboxes_norm[{{},1}][x0gtx1]) - bboxes_norm[{{},2}][y0gty1]:add(bboxes_norm[{{},4}][y0gty1]):div(2) - bboxes_norm[{{},4}][y0gty1]:copy(bboxes_norm[{{},2}][y0gty1]) + bboxes_norm[{{},2}][y0gty1] = bboxes_norm[{{},2}][y0gty1]:add(bboxes_norm[{{},4}][y0gty1]):div(2) + bboxes_norm[{{},4}][y0gty1] = (bboxes_norm[{{},2}][y0gty1]) -- remove repeated projections if self.dedup then @@ -428,7 +425,7 @@ print(bboxes_norm) end end - projected_bb:floor() + --projected_bb:floor() return bestScale,bestbboxes,bboxes_norm,projected_bb end From bb053cbd241a4d7d855b2cb72d03e38f1a5ba831 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 20 Sep 2015 12:30:20 +0200 Subject: [PATCH 28/79] Batch providers are working again. Cleaned a bit the Tester. Need to add argcheck to functions (but first need to figure out how to pass several parameters without it crashing, maybe using ordered arguments) --- BatchProvider.lua | 12 ++---------- BatchProviderROI.lua | 7 ++++--- RCNN.lua | 3 ++- SPP.lua | 11 +---------- Tester_FRCNN.lua | 24 ++++++++++++++---------- 5 files changed, 23 insertions(+), 34 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index 344e0b1..f768480 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -52,8 +52,8 @@ local initcheck = argcheck{ } --]] -- -function BatchProvider:__init(...) - parent:__init(...) +function BatchProvider:__init(dataset) + parent:__init() self.nTimesMoreData = 10 self.iter_per_batch = 500 @@ -156,9 +156,6 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_label,bg_label) local num_pos = bboxes[1] and #bboxes[1] or 0 local num_neg = bboxes[0] and #bboxes[0] or 0 - --fg_data:resize(num_pos,unpack(self.batch_dim)) - --bg_data:resize(num_neg,unpack(self.batch_dim)) - fg_label:resize(num_pos,self.target_dim) bg_label:resize(num_neg,self.target_dim) @@ -171,14 +168,12 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_label,bg_label) for i=1,num_pos do local bbox = {bboxes[1][i][2],bboxes[1][i][3],bboxes[1][i][4],bboxes[1][i][5]} table.insert(s_boxes,bbox) - --fg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) fg_label[i][1] = bboxes[1][i][6] end for i=1,num_neg do local bbox = {bboxes[0][i][2],bboxes[0][i][3],bboxes[0][i][4],bboxes[0][i][5]} table.insert(s_boxes,bbox) - --bg_data[i] = self.feat_provider:getFeature(im_idx,bbox,flip) bg_label[i][1] = bboxes[0][i][6] end @@ -188,7 +183,6 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_label,bg_label) local bg_data = feats:narrow(1,num_pos+1,num_neg) return fg_data, bg_data --- return fg_data,bg_data,fg_label,bg_label end function BatchProvider:prepareBatch(batches,targets) @@ -214,8 +208,6 @@ function BatchProvider:prepareBatch(batches,targets) local bg_counter = 0 local fg_data,bg_data,fg_label,bg_label - --fg_data = torch.FloatTensor() - --bg_data = torch.FloatTensor() fg_label = torch.IntTensor() bg_label = torch.IntTensor() diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index a54afba..dc4fcef 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -44,9 +44,10 @@ local initcheck = argcheck{ } --]] function BatchProviderROI:__init(dataset) - parent:__init{dataset=dataset} + parent:__init() + self.dataset = dataset self.imgs_per_batch = 2 - self.feature_provider = nnf.FRCNN{} + self.feat_provider = nnf.FRCNN{} end -- setup is the same @@ -141,7 +142,7 @@ function BatchProviderROI:getBatch() table.insert(imgs,dataset:getImage(opts.img_idx[i])) end local boxes,labels = self:selectBBoxes(fg_windows,bg_windows) - self.batches = self.feature_provider:getFeature(imgs,boxes,opts.do_flip) + self.batches = self.feat_provider:getFeature(imgs,boxes,opts.do_flip) targets:resize(labels:size()):copy(labels) diff --git a/RCNN.lua b/RCNN.lua index 29a5f1f..0423b52 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -91,7 +91,7 @@ function RCNN:getCrop(output,I,bbox) ------ --local patch = image.crop(I,bbox[1],bbox[2],bbox[3],bbox[4]); - local patch = image.crop(I,bbox[1],bbox[2],bbox[3],bbox[4]):float(); + local patch = I[{{},{bbox[2],bbox[4]},{bbox[1],bbox[3]}}] local tmp = image.scale(patch,crop_width,crop_height,'bilinear'); if image_mean then @@ -139,6 +139,7 @@ function RCNN:getFeature(im,bbox,flip) self._feat:resize(num_boxes,table.unpack(self.output_size)):zero() + -- use threads to make it faster for i=1,num_boxes do self:getCrop(self._feat[i],im,bbox[i]) end diff --git a/SPP.lua b/SPP.lua index 9c46c0c..6bd8663 100644 --- a/SPP.lua +++ b/SPP.lua @@ -62,7 +62,7 @@ function SPP:getCrop_old(im_idx,bbox,flip) local crop_feat = self:getCroppedFeat(self.curr_im_feats.rsp[bestScale],box_norm) - return crop_feat,box_norm,bestBbox + return crop_feat end function SPP:getCrop(im_idx,bbox,flip) @@ -83,11 +83,6 @@ function SPP:getCrop(im_idx,bbox,flip) flipBoundingBoxes(bbox,self.curr_im_feats.imSize[3]) end - --local bestScale,bestBbox = self:getBestSPPScale(bbox,self.curr_im_feats.imSize,self.curr_im_feats.scales) - --local box_norm = self:getResposeBoxes(bestBbox) - - --local crop_feat = self:getCroppedFeat(self.curr_im_feats.rsp[bestScale],box_norm) - local feat = self.curr_im_feats local bestScale,bestbboxes,bboxes_norm,projected_bb = self:projectBoxes(feat, bbox, feat.scales) @@ -95,12 +90,9 @@ function SPP:getCrop(im_idx,bbox,flip) local crop_feat = {} for i=1,bbox:size(1) do local bbox_ = projected_bb[i] --- print(bbox_) --- print(i) local patch = feat.rsp[bestScale[i]][{{},{bbox_[2],bbox_[4]},{bbox_[1],bbox_[3]}}] table.insert(crop_feat,patch) end - return crop_feat end @@ -121,7 +113,6 @@ function SPP:getFeature(im_idx,bbox,flip) local crop_feat = self:getCrop(im_idx,bbox,flip) - --local feat = self.spp_pooler:forward(crop_feat) self._feat = self._feat or torch.FloatTensor() self._feat:resize(#crop_feat,table.unpack(self.output_size)) for i=1,#crop_feat do diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua index fef9519..34f0086 100644 --- a/Tester_FRCNN.lua +++ b/Tester_FRCNN.lua @@ -6,14 +6,11 @@ local VOCevaldet = utils.VOCevaldet local Tester = torch.class('nnf.Tester_FRCNN') -function Tester:__init(module,feat_provider) - self.dataset = feat_provider.dataset - self.module = module +function Tester:__init(module,feat_provider,dataset) + self.dataset = dataset self.feat_provider = feat_provider + self.module = module - self.feat_dim = {256*50} - self.max_batch_size = 4000 - self.cachefolder = nil self.cachename = nil self.suffix = '' @@ -162,6 +159,15 @@ function Tester:test(iteration) res[i] = VOCevaldet(dataset,aboxes[i],cls) end res = torch.Tensor(res) + + print_scores(dataset,res) + + -- clean roidb to free memory + dataset.roidb = nil + return res +end + +local function print_scores(dataset,res) print('Results:') -- print class names io.write('|') @@ -181,8 +187,6 @@ function Tester:test(iteration) end io.write('\n') io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) - - -- clean roidb to free memory - dataset.roidb = nil - return res end + + From ab435d3aa57e97afe8513e73bde070eeae17408d Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 20 Sep 2015 20:54:07 +0200 Subject: [PATCH 29/79] Adding argcheck for most of the classes --- BatchProvider.lua | 60 ++++++++++++++++--------------------- BatchProviderROI.lua | 35 +++++++++++----------- FRCNN.lua | 33 +++++++++++++++----- RCNN.lua | 44 ++++++++++++++++++++++----- SPP.lua | 71 +++++++++++++++++++++++++++++++++++++++++--- Trainer.lua | 6 ++-- 6 files changed, 174 insertions(+), 75 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index f768480..73aec33 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -1,6 +1,6 @@ local BatchProvider,parent = torch.class('nnf.BatchProvider','nnf.BatchProviderBase') ---[[ + local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, @@ -9,65 +9,55 @@ local initcheck = argcheck{ type="nnf.DataSetPascal", help="A dataset class" }, - {name="nTimesMoreData", + {name="feat_provider", + type="nnf.RCNN", + help="A feat provider class" + }, + {name="batch_size", type="number", - opt=true, - help=""}, + default=128, + help="batch size"}, {name="iter_per_batch", type="number", - opt=true, + default=10, help=""}, - {name="batch_dim", - type="table", - opt=true, - help=""}, - {name="target_dim", + {name="nTimesMoreData", type="number", - opt=true, + default=10, help=""}, - {name="batch_size", - type="number", - opt=true, - help="batch size"}, {name="fg_fraction", type="number", - opt=true, + default=0.25, help="foreground fraction in batch" }, {name="fg_threshold", type="number", - opt=true, + default=0.5, help="foreground threshold" }, {name="bg_threshold", type="table", - opt=true, + default={0.1,0.5}, help="background threshold, in the form {LO,HI}" }, + {name="target_dim", + type="number", + default=1, + help=""}, {name="do_flip", type="boolean", - opt=true, + default=true, help="sample batches with random flips" }, } ---]] --- -function BatchProvider:__init(dataset) + +function BatchProvider:__init(...) parent:__init() - self.nTimesMoreData = 10 - self.iter_per_batch = 500 - - self.dataset = dataset - self.feat_provider = nnf.RCNN(self.dataset) - self.batch_dim = self.feat_provider.output_size--{256*50} - self.target_dim = 1 - - --local opts = initcheck(...) - --for k,v in pairs(opts) do self[k] = v end - - --self.dataset = feat_provider.dataset - --self.feat_provider = feat_provider + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end + + self.batch_dim = self.feat_provider.output_size end diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index dc4fcef..7c5850b 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -1,5 +1,5 @@ local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProviderBase') ---[[ + local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, @@ -8,46 +8,45 @@ local initcheck = argcheck{ type="nnf.DataSetPascal", help="A dataset class" }, + {name="feat_provider", + type="nnf.FRCNN", + help="A feat provider class" + }, {name="batch_size", type="number", opt=true, help="batch size"}, - {name="batch_size", + {name="imgs_per_batch", type="number", - opt=true, - help="batch size"}, + default=2, + help="number of images to sample in a batch"}, {name="fg_fraction", type="number", - opt=true, + default=0.25, help="foreground fraction in batch" }, {name="fg_threshold", type="number", - opt=true, + default=0.5, help="foreground threshold" }, {name="bg_threshold", type="table", - opt=true, + default={0.1,0.5}, help="background threshold, in the form {LO,HI}" }, - {name="createWindow", - type="function", - opt=true, - help="" - }, {name="do_flip", type="boolean", - opt=true, + default=true, help="sample batches with random flips" }, } ---]] -function BatchProviderROI:__init(dataset) + +function BatchProviderROI:__init(...) parent:__init() - self.dataset = dataset - self.imgs_per_batch = 2 - self.feat_provider = nnf.FRCNN{} + + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end end -- setup is the same diff --git a/FRCNN.lua b/FRCNN.lua index e893683..07900ef 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -1,15 +1,34 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local FRCNN = torch.class('nnf.FRCNN') -function FRCNN:__init() - - self.image_transformer = nnf.ImageTransformer{} - self.scale = {600} - self.max_size = 1000 +local argcheck = require 'argcheck' +local initcheck = argcheck{ + pack=true, + {name="scale", + type="table", + default={600}, + help="image scales"}, + {name="max_size", + type="number", + default=1000, + help="maximum dimension of an image"}, + {name="inputArea", + type="number", + default=224^2, + help="force square crops"}, + {name="image_transformer", + type="nnf.ImageTransformer", + default=nnf.ImageTransformer{}, + help="Class to preprocess input images"}, +} + + +function FRCNN:__init(...) + + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end self.train = true - - self.inputArea = 224^2 end function FRCNN:training() diff --git a/RCNN.lua b/RCNN.lua index 0423b52..bf1331d 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -1,21 +1,49 @@ local argcheck = require 'argcheck' local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes +local argcheck = require 'argcheck' +local initcheck = argcheck{ + pack=true, + {name="crop_size", + type="number", + default=227, + help="crop size"}, + {name="padding", + type="number", + default=16, + help="context padding"}, + {name="use_square", + type="boolean", + default=false, + help="force square crops"}, + {name="image_transformer", + type="nnf.ImageTransformer", + default=nnf.ImageTransformer{}, + help="Class to preprocess input images"}, + {name="max_batch_size", + type="number", + default=128, + help="maximum size of batches during evaluation"}, + {name="dataset", + type="nnf.DataSetPascal", -- change to allow other datasets + opt=true, + help="A dataset class"}, +} + + local RCNN = torch.class('nnf.RCNN') -function RCNN:__init(dataset) - self.dataset = dataset - self.image_transformer = nnf.ImageTransformer{ - mean_pix={123.68/255,116.779/255,103.939/255}} +function RCNN:__init(...) +-- self.image_transformer = nnf.ImageTransformer{ +-- mean_pix={123.68/255,116.779/255,103.939/255}} - self.crop_size = 227 self.image_mean = nil - self.padding = 16 - self.use_square = false + + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end self.output_size = {3,self.crop_size,self.crop_size} self.train = true - self.max_batch_size = 128 end function RCNN:training() diff --git a/SPP.lua b/SPP.lua index 6bd8663..4af9636 100644 --- a/SPP.lua +++ b/SPP.lua @@ -3,12 +3,75 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local SPP = torch.class('nnf.SPP') ---TODO vectorize code ? -function SPP:__init(model,dataset) +-- argcheck crashes with that many arguments, and using unordered +-- doesn't seems practical +--[[ +local argcheck = require 'argcheck' +local initcheck = argcheck{ + pack=true, + {name="model", + type="nn.Sequential", + help="conv5 model"}, + {name="dataset", + type="nnf.DataSetPascal", -- change to allow other datasets + opt=true, + help="A dataset class"}, + {name="pooling_scales", + type="table", + default={{1,1},{2,2},{3,3},{6,6}}, + help="pooling scales"}, + {name="num_feat_chns", + type="number", + default=256, + help="number of feature channels to be pooled"}, + {name="scales", + type="table", + default={480,576,688,874,1200}, + help="image scales"}, + {name="sz_conv_standard", + type="number", + default=13, + help=""}, + {name="step_standard", + type="number", + default=16, + help=""}, + {name="offset0", + type="number", + default=21, + help=""}, + {name="offset", + type="number", + default=6.5, + help=""}, + {name="inputArea", + type="number", + default=224^2, + help="force square crops"}, + {name="image_transformer", + type="nnf.ImageTransformer", + default=nnf.ImageTransformer{}, + help="Class to preprocess input images"}, + {name="use_cache", + type="boolean", + default=true, + help=""}, + {name="cachedir", + type="string", + opt=true, + help=""}, +} +--]] + + +function SPP:__init(...) self.dataset = dataset self.model = model + --local opts = initcheck(...) + --for k,v in pairs(opts) do self[k] = v end + self.num_feat_chns = 256 self.pooling_scales = {{1,1},{2,2},{3,3},{6,6}} local pyr = torch.Tensor(self.pooling_scales):t() @@ -17,7 +80,7 @@ function SPP:__init(model,dataset) --self.spp_pooler = inn.SpatialPyramidPooling(self.pooling_scales):float() self.image_transformer = nnf.ImageTransformer{} - +-- [[ -- paper=864, their code=874 self.scales = {480,576,688,874,1200} -- 874 @@ -31,7 +94,7 @@ function SPP:__init(model,dataset) self.use_cache = true self.cachedir = nil - + --]] self.train = true end diff --git a/Trainer.lua b/Trainer.lua index 87f6bfe..cca5478 100644 --- a/Trainer.lua +++ b/Trainer.lua @@ -15,7 +15,7 @@ function Trainer:__init(module,criterion,batch_provider) self.parameters,self.gradParameters = self.module:getParameters() self.optimState = {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, - learningRateDecay = 0} + learningRateDecay = 0, dampening = 0} self.epoch = 0 @@ -51,7 +51,7 @@ function Trainer:train() xlua.progress(t,maxIter) -- get training batch - self.input0,self.target0 = batch_provider(self.input0,self.target0) + self.input0,self.target0 = batch_provider:getBatch() -- copy to ttype self.input,self.input0 = recursiveResizeAsCopyTyped(self.input,self.input0,ttype) @@ -85,6 +85,6 @@ function Trainer:train() table.insert(self.fx,err/maxIter) - self.module:evaluate() + --self.module:evaluate() self.epoch = self.epoch + 1 end From c9545ae87090d30a4f74265e045e1d2e95d3b6f6 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 20 Sep 2015 23:12:28 +0200 Subject: [PATCH 30/79] Reduce memory usage during detection for RCNN and SPP --- FRCNN.lua | 5 +++- ImageDetect.lua | 4 +-- RCNN.lua | 6 ++++- SPP.lua | 6 ++++- data.lua | 4 +-- test.lua | 13 --------- tests/test_imdetect.lua | 59 +++++++++++++++++++++++++++++++++++++++++ tests/test_utils.lua | 49 ++++++++++++++++++++++++++++++++++ 8 files changed, 125 insertions(+), 21 deletions(-) delete mode 100644 test.lua create mode 100644 tests/test_imdetect.lua create mode 100644 tests/test_utils.lua diff --git a/FRCNN.lua b/FRCNN.lua index 07900ef..06378e9 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -1,4 +1,5 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes +local recursiveResizeAsCopyTyped = paths.dofile('utils.lua').recursiveResizeAsCopyTyped local FRCNN = torch.class('nnf.FRCNN') local argcheck = require 'argcheck' @@ -167,6 +168,8 @@ function FRCNN:postProcess(im,boxes,output) end function FRCNN:compute(model, inputs) - return model:forward(inputs) + local ttype = model.output:type() + self.inputs,inputs = recursiveResizeAsCopyTyped(self.inputs,inputs,ttype) + return model:forward(self.inputs) end diff --git a/ImageDetect.lua b/ImageDetect.lua index e75d018..42d5c07 100644 --- a/ImageDetect.lua +++ b/ImageDetect.lua @@ -10,12 +10,10 @@ end -- supposes boxes is in [x1,y1,x2,y2] format function ImageDetect:detect(im,boxes) local feat_provider = self.feat_provider - local ttype = self.model.output:type() local inputs = feat_provider:getFeature(im,boxes) - self.inputs,inputs = recursiveResizeAsCopyTyped(self.inputs,inputs,ttype) - local output0 = feat_provider:compute(self.model, self.inputs) + local output0 = feat_provider:compute(self.model, inputs) local output = feat_provider:postProcess(im,boxes,output0) --self.sm:forward(output0) diff --git a/RCNN.lua b/RCNN.lua index bf1331d..f3e7bac 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -185,8 +185,12 @@ function RCNN:compute(model,inputs) self.output = self.output or inputs.new() + local ttype = model.output:type() + self.inputs = self.inputs or torch.Tensor():type(ttype) + for idx, f in ipairs(inputs_s) do - local output0 = model:forward(f) + self.inputs:resize(f:size()):copy(f) + local output0 = model:forward(self.inputs) local fs = f:size(1) if idx == 1 then local ss = output0[1]:size():totable() diff --git a/SPP.lua b/SPP.lua index 4af9636..313ad44 100644 --- a/SPP.lua +++ b/SPP.lua @@ -493,8 +493,12 @@ function SPP:compute(model,inputs) self.output = self.output or inputs.new() + local ttype = model.output:type() + self.inputs = self.inputs or torch.Tensor():type(ttype) + for idx, f in ipairs(inputs_s) do - local output0 = model:forward(f) + self.inputs:resize(f:size()):copy(f) + local output0 = model:forward(self.inputs) local fs = f:size(1) if idx == 1 then local ss = output0[1]:size():totable() diff --git a/data.lua b/data.lua index cb168e1..04569db 100644 --- a/data.lua +++ b/data.lua @@ -40,7 +40,7 @@ else datadir=opt.datadir,roidbdir=opt.roidbdir} - feat_provider = FP(ds_train) + feat_provider = FP(fp_params) batch_provider = BP(bp_params) batch_provider:setupData() @@ -59,7 +59,7 @@ else datadir=opt.datadir,roidbdir=opt.roidbdir} - feat_provider_test = FP(ds_test) + feat_provider_test = FP(fp_params) -- disable flip ? bp_params.do_flip = false batch_provider_test = BP(bp_params) diff --git a/test.lua b/test.lua deleted file mode 100644 index 6057a07..0000000 --- a/test.lua +++ /dev/null @@ -1,13 +0,0 @@ -require 'nn' -nnf = {} -dofile 'ROIPooling.lua' - -m = nnf.ROIPooling(3,3) - -t = {torch.rand(1,10,10),torch.Tensor({{1,1,5,5},{2,3,7,8},{6,4,8,8},{6,4,10,10},{8,8,10,10}})} -- -g = torch.rand(t[2]:size(1),1,3,3) - -o = m:forward(t) -gg = m:backward(t,g) - - diff --git a/tests/test_imdetect.lua b/tests/test_imdetect.lua new file mode 100644 index 0000000..9884cd7 --- /dev/null +++ b/tests/test_imdetect.lua @@ -0,0 +1,59 @@ +dofile 'test_utils.lua' + +detect1 = nnf.ImageDetect(model1,fp1) +detect = nnf.ImageDetect(model,fp2) + + +-------------------------------------------------------------------------------- +-- define batch providers +-------------------------------------------------------------------------------- + +bp1 = nnf.BatchProvider{dataset=ds,feat_provider=fp1} +bp1.nTimesMoreData = 2 +bp1.iter_per_batch = 10 +bp2 = nnf.BatchProviderROI{dataset=ds,feat_provider=fp2} + +bp1.bboxes = torch.load('tests/bproibox.t7') +bp2.bboxes = torch.load('tests/bproibox.t7') + +print('test1') +b,t = bp1:getBatch() +print('test2') +b,t = bp2:getBatch() + +-- mixing does not work for the moment, as FRCNN accepts a set of images as input +-- whereas RCNN and SPP supposes that only one image is provided at a time +--[[ +bp3 = nnf.BatchProviderROI(ds) +bp3.bboxes = torch.load('tests/bproibox.t7') +bp3.feat_provider = fp1 +print('test3') +b,t = bp3:getBatch() +--]] +-------------------------------------------------------------------------------- +-- +-------------------------------------------------------------------------------- + +idx = 100 +im = ds:getImage(idx) +boxes = ds:getROIBoxes(idx) + +--output = detect1:detect(im,boxes) +--output0 = detect:detect(im,boxes) + +-------------------------------------------------------------------------------- +-- compare old and new SPP implementations for the cropping +-------------------------------------------------------------------------------- +--[[ +output_old = {} +for i=1,boxes:size(1) do + tt0 = fp3:getCrop_old(im,boxes[i]) + output_old[i] = tt0 +end + +output_new = fp3:getCrop(im,boxes) --[881] + +for i=1,boxes:size(1) do + assert(output_old[i]:eq(output_new[i]):all(),'error '..i) +end +--]] diff --git a/tests/test_utils.lua b/tests/test_utils.lua new file mode 100644 index 0000000..f2e79e8 --- /dev/null +++ b/tests/test_utils.lua @@ -0,0 +1,49 @@ +require 'nnf' +require 'nn' + +function getDS() + local dt = torch.load('pascal_2007_train.t7') + local ds = nnf.DataSetPascal{image_set='train', + datadir='/home/francisco/work/datasets/VOCdevkit', + roidbdir='/home/francisco/work/datasets/rcnn/selective_search_data' + } + ds.roidb = dt.roidb + return ds +end + +function getModel() + local features = nn.Sequential() + features:add(nn.SpatialConvolutionMM(3,16,11,11,16,16,5,5)) + local classifier = nn.Sequential() + classifier:add(nn.Linear(7*7*16,21)) + local model1 = nn.Sequential() + model1:add(features) + model1:add(nn.SpatialMaxPooling(2,2,2,2)) + model1:add(nn.View(-1):setNumInputDims(3)) + model1:add(classifier) + local model = nn.Sequential() + local prl = nn.ParallelTable() + prl:add(features) + prl:add(nn.Identity()) + model:add(prl) + model:add(nnf.ROIPooling(7,7):setSpatialScale(1/16)) + model:add(nn.View(-1):setNumInputDims(3)) + model:add(classifier) + return model1, model, features, classifier +end + +-------------------------------------------------------------------------------- +-- define dataset, models and feature providers +-------------------------------------------------------------------------------- + +ds = getDS() + +model1, model, features, classifier = getModel() + +fp1 = nnf.RCNN() +fp2 = nnf.FRCNN() +fp3 = nnf.SPP(features) +fp3.use_cache = false +fp3:evaluate() + + From 48037f57e367134f401d077b647799d1a110c0b8 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 21 Sep 2015 07:39:50 +0200 Subject: [PATCH 31/79] ImageDetect also outputs bbox, enabling implementing bbox regression --- FRCNN.lua | 2 +- ImageDetect.lua | 4 ++-- RCNN.lua | 2 +- SPP.lua | 2 +- Tester_FRCNN.lua | 3 ++- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/FRCNN.lua b/FRCNN.lua index 06378e9..efc48ed 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -164,7 +164,7 @@ end -- do the bbox regression function FRCNN:postProcess(im,boxes,output) -- not implemented yet - return output + return output,boxes end function FRCNN:compute(model, inputs) diff --git a/ImageDetect.lua b/ImageDetect.lua index 42d5c07..d3140df 100644 --- a/ImageDetect.lua +++ b/ImageDetect.lua @@ -14,9 +14,9 @@ function ImageDetect:detect(im,boxes) local inputs = feat_provider:getFeature(im,boxes) local output0 = feat_provider:compute(self.model, inputs) - local output = feat_provider:postProcess(im,boxes,output0) + local output,boxes_p = feat_provider:postProcess(im,boxes,output0) --self.sm:forward(output0) self.output,output = recursiveResizeAsCopyTyped(self.output,output,'torch.FloatTensor') - return self.output + return self.output,boxes_p end diff --git a/RCNN.lua b/RCNN.lua index f3e7bac..eb83118 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -177,7 +177,7 @@ end -- don't do anything. could be the bbox regression or SVM, but I won't add it here function RCNN:postProcess(im,bbox,output) - return output + return output,bbox end function RCNN:compute(model,inputs) diff --git a/SPP.lua b/SPP.lua index 313ad44..8b5234f 100644 --- a/SPP.lua +++ b/SPP.lua @@ -485,7 +485,7 @@ end -- don't do anything. could be the bbox regression or SVM, but I won't add it here function SPP:postProcess(im,bbox,output) - return output + return output,bbox end function SPP:compute(model,inputs) diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua index 34f0086..57fd41f 100644 --- a/Tester_FRCNN.lua +++ b/Tester_FRCNN.lua @@ -68,6 +68,7 @@ function Tester:test(iteration) local detec = nnf.ImageDetect(module, feat_provider) local boxes local im + local output local aboxes = {} for i=1,dataset.num_classes do @@ -89,7 +90,7 @@ function Tester:test(iteration) boxes = dataset:getROIBoxes(i):float() im = dataset:getImage(i) timer3:reset() - local output = detec:detect(im,boxes) + output,boxes = detec:detect(im,boxes) local add_bg = 1 local tt = 0 From 5c289d1f459db570fa52e07cf622b536518ac7e4 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 21 Sep 2015 08:10:46 +0200 Subject: [PATCH 32/79] Add simple tests for training --- Trainer.lua | 8 ++++---- nnf.lua | 5 +++-- tests/test_train.lua | 26 ++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 tests/test_train.lua diff --git a/Trainer.lua b/Trainer.lua index cca5478..1828584 100644 --- a/Trainer.lua +++ b/Trainer.lua @@ -25,8 +25,10 @@ function Trainer:__init(module,criterion,batch_provider) end -function Trainer:train() - +function Trainer:train(maxIter) + local maxIter = maxIter or 20 + local ttype = self.parameters:type() + self.module:training() local module = self.module @@ -37,8 +39,6 @@ function Trainer:train() local criterion = self.criterion local optimState = self.optimState - --local maxIter = inputs:size(1) - if self.confusion then self.confusion:zero() end diff --git a/nnf.lua b/nnf.lua index 0110bf4..d43cef9 100644 --- a/nnf.lua +++ b/nnf.lua @@ -5,12 +5,14 @@ require 'xlua' nnf = {} +torch.include('nnf','ImageTransformer.lua') + torch.include('nnf','DataSetPascal.lua') torch.include('nnf','BatchProviderBase.lua') torch.include('nnf','BatchProvider.lua') torch.include('nnf','BatchProviderROI.lua') ---torch.include('nnf','SPP.lua') +torch.include('nnf','SPP.lua') torch.include('nnf','RCNN.lua') torch.include('nnf','FRCNN.lua') torch.include('nnf','ROIPooling.lua') @@ -21,6 +23,5 @@ torch.include('nnf','Tester_FRCNN.lua') torch.include('nnf','SVMTrainer.lua') -torch.include('nnf','ImageTransformer.lua') torch.include('nnf','ImageDetect.lua') --return nnf diff --git a/tests/test_train.lua b/tests/test_train.lua new file mode 100644 index 0000000..7f50819 --- /dev/null +++ b/tests/test_train.lua @@ -0,0 +1,26 @@ +dofile 'tests/test_utils.lua' + +-------------------------------------------------------------------------------- +-- define batch providers +-------------------------------------------------------------------------------- + +bp1 = nnf.BatchProvider{dataset=ds,feat_provider=fp1} +bp1.nTimesMoreData = 2 +bp1.iter_per_batch = 10 +bp2 = nnf.BatchProviderROI{dataset=ds,feat_provider=fp2} + +bp1.bboxes = torch.load('tests/bproibox.t7') +bp2.bboxes = torch.load('tests/bproibox.t7') + +-------------------------------------------------------------------------------- +-- +-------------------------------------------------------------------------------- + +criterion = nn.CrossEntropyCriterion() + +trainer = nnf.Trainer(model1,criterion,bp1) + +for i=1,10 do + trainer:train(10) +end + From 20a3b8e5d50174cc7ea925559645c69ecfd38ce4 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 21 Sep 2015 08:32:51 +0200 Subject: [PATCH 33/79] Basic test comprising training and testing for FRCNN --- FRCNN.lua | 2 +- tests/test_full_frcnn.lua | 84 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 tests/test_full_frcnn.lua diff --git a/FRCNN.lua b/FRCNN.lua index efc48ed..59e682a 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -168,7 +168,7 @@ function FRCNN:postProcess(im,boxes,output) end function FRCNN:compute(model, inputs) - local ttype = model.output:type() + local ttype = model.output:type() -- fix when doing bbox regression self.inputs,inputs = recursiveResizeAsCopyTyped(self.inputs,inputs,ttype) return model:forward(self.inputs) end diff --git a/tests/test_full_frcnn.lua b/tests/test_full_frcnn.lua new file mode 100644 index 0000000..ee23707 --- /dev/null +++ b/tests/test_full_frcnn.lua @@ -0,0 +1,84 @@ +require 'nnf' +require 'loadcaffe' + +ds = nnf.DataSetPascal{image_set='trainval', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } +local image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} + +fp = nnf.FRCNN{image_transformer=image_transformer} +fp:training() +-------------------------------------------------------------------------------- +-- define batch providers +-------------------------------------------------------------------------------- + +bp = nnf.BatchProviderROI{dataset=ds,feat_provider=fp, + bg_threshold={0.1,0.5} + } +bp:setupData() + +-------------------------------------------------------------------------------- +-- define model +-------------------------------------------------------------------------------- +model = nn.Sequential() +do + local rcnnfold = '/home/francisco/work/libraries/caffe/examples/imagenet/' + local base_model = loadcaffe.load( + rcnnfold..'imagenet_deploy.prototxt', + rcnnfold..'caffe_reference_imagenet_model', + 'cudnn') + + for i=1,14 do + features:add(base_model:get(i):clone()) + end + for i=17,22 do + classifier:add(base_model:get(i):clone()) + end + classifier:add(nn.Linear(4096,21):cuda()) + + collectgarbage() + local prl = nn.ParallelTable() + prl:add(features) + prl:add(nn.Identity()) + model:add(prl) + --model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) + model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) + model:add(nn.View(-1):setNumInputDims(3)) + model:add(classifier) + +end + +-------------------------------------------------------------------------------- +-- train +-------------------------------------------------------------------------------- + +criterion = nn.CrossEntropyCriterion() + +trainer = nnf.Trainer(model,criterion,bp) + +for i=1,10 do + trainer:train(10) +end + +-------------------------------------------------------------------------------- +-- evaluate +-------------------------------------------------------------------------------- + +-- add softmax to classfier +model:add(nn.SoftMax()) + +dsv = nnf.DataSetPascal{image_set='test', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } + + +fpv = nnf.FRCNN{image_transformer=image_transformer} +fpv:evaluate() + +tester = nnf.Tester_FRCNN(model,fpv,dsv) +tester.cachefolder = 'cachedir/'..exp_name +tester:test(num_iter) From f932bd0ee9fbf1f088276e9b92a0b09ad7c5a3a9 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Wed, 23 Sep 2015 22:42:29 +0200 Subject: [PATCH 34/79] Fix super constructors in BatchProviders. Thanks @0wu --- BatchProvider.lua | 2 +- BatchProviderROI.lua | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index 73aec33..8e7650d 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -52,7 +52,7 @@ local initcheck = argcheck{ } function BatchProvider:__init(...) - parent:__init() + parent.__init(self) local opts = initcheck(...) for k,v in pairs(opts) do self[k] = v end diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 7c5850b..0cbee39 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -43,7 +43,7 @@ local initcheck = argcheck{ } function BatchProviderROI:__init(...) - parent:__init() + parent.__init(self) local opts = initcheck(...) for k,v in pairs(opts) do self[k] = v end From 295724f7fd57a675a753cd0dc4e75b4ed632b972 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Wed, 23 Sep 2015 23:23:05 +0200 Subject: [PATCH 35/79] Started cleaning a bit DataSetPascal --- DataSetPascal.lua | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/DataSetPascal.lua b/DataSetPascal.lua index 365f93f..2f56817 100644 --- a/DataSetPascal.lua +++ b/DataSetPascal.lua @@ -319,16 +319,11 @@ function DataSetPascal:attachProposals(i) local gt_boxes,gt_classes,valid_objects,anno = self:getGTBoxes(i) local all_boxes - if anno.object then - if #valid_objects > 0 and boxes:dim() > 0 then - all_boxes = torch.cat(gt_boxes,boxes,1) - elseif boxes:dim() == 0 then - all_boxes = gt_boxes - else - all_boxes = boxes - end + if gt_boxes:dim() > 0 and boxes:dim() > 0 then + all_boxes = torch.cat(gt_boxes,boxes,1) + elseif boxes:dim() == 0 then + all_boxes = gt_boxes else - gt_boxes = torch.IntTensor(0,4) all_boxes = boxes end @@ -353,7 +348,7 @@ function DataSetPascal:attachProposals(i) local o = boxoverlap(all_boxes,gt_boxes[idx]) local tmp = rec.overlap_class[{{},gt_classes[idx]}] -- pointer copy tmp[tmp:lt(o)] = o[tmp:lt(o)] - rec.overlap[{{},idx}] = boxoverlap(all_boxes,gt_boxes[idx]) + rec.overlap[{{},idx}] = o end -- get max class overlap --rec.overlap,rec.label = rec.overlap:max(2) From 1ac5175583fb8778e818d5ab9b479bde53856541 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 24 Sep 2015 10:47:47 +0200 Subject: [PATCH 36/79] Add utility concat, allows concatenating void tensors --- DataSetPascal.lua | 36 +++++++----------------------------- utils.lua | 14 ++++++++++++++ 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/DataSetPascal.lua b/DataSetPascal.lua index 2f56817..627e13e 100644 --- a/DataSetPascal.lua +++ b/DataSetPascal.lua @@ -1,6 +1,7 @@ local matio = require 'matio' local argcheck = require 'argcheck' local xml = require 'xml' +local concat = paths.dofile('utils.lua').concat matio.use_lua_strings = true @@ -318,29 +319,14 @@ function DataSetPascal:attachProposals(i) local boxes = self:getROIBoxes(i) local gt_boxes,gt_classes,valid_objects,anno = self:getGTBoxes(i) - local all_boxes - if gt_boxes:dim() > 0 and boxes:dim() > 0 then - all_boxes = torch.cat(gt_boxes,boxes,1) - elseif boxes:dim() == 0 then - all_boxes = gt_boxes - else - all_boxes = boxes - end + local all_boxes = concat(gt_boxes,boxes,1) local num_boxes = boxes:dim() > 0 and boxes:size(1) or 0 local num_gt_boxes = #gt_classes local rec = {} - if num_gt_boxes > 0 and num_boxes > 0 then - rec.gt = torch.cat(torch.ByteTensor(num_gt_boxes):fill(1), - torch.ByteTensor(num_boxes):fill(0) ) - elseif num_boxes > 0 then - rec.gt = torch.ByteTensor(num_boxes):fill(0) - elseif num_gt_boxes > 0 then - rec.gt = torch.ByteTensor(num_gt_boxes):fill(1) - else - rec.gt = torch.ByteTensor(0) - end + rec.gt = concat(torch.ByteTensor(num_gt_boxes):fill(1), + torch.ByteTensor(num_boxes):fill(0) ) rec.overlap_class = torch.FloatTensor(num_boxes+num_gt_boxes,self.num_classes):fill(0) rec.overlap = torch.FloatTensor(num_boxes+num_gt_boxes,num_gt_boxes):fill(0) @@ -374,17 +360,9 @@ function DataSetPascal:attachProposals(i) end rec.boxes = all_boxes - if num_gt_boxes > 0 and num_boxes > 0 then - rec.class = torch.cat(torch.CharTensor(gt_classes), - torch.CharTensor(num_boxes):fill(0)) - elseif num_boxes > 0 then - rec.class = torch.CharTensor(num_boxes):fill(0) - elseif num_gt_boxes > 0 then - rec.class = torch.CharTensor(gt_classes) - else - rec.class = torch.CharTensor(0) - end - + rec.class = concat(torch.CharTensor(gt_classes), + torch.CharTensor(num_boxes):fill(0)) + if self.save_objs then rec.objects = {} for _,idx in pairs(valid_objects) do diff --git a/utils.lua b/utils.lua index aa7e54a..506cc83 100644 --- a/utils.lua +++ b/utils.lua @@ -46,6 +46,19 @@ local function recursiveResizeAsCopyTyped(t1,t2,type) return t1, t2 end +local function concat(t1,t2,dim) + local out + assert(t1:type() == t2:type(),'tensors should have the same type') + if t1:dim() > 0 and t2:dim() > 0 then + out = torch.cat(t1,t2,dim) + elseif t1:dim() > 0 then + out = t1:clone() + else + out = t2:clone() + end + return out +end + -- modify bbox input local function flipBoundingBoxes(bbox, im_width) if bbox:dim() == 1 then @@ -299,6 +312,7 @@ utils.reshapeLastLinearLayer = reshapeLastLinearLayer utils.sanitize = sanitize utils.recursiveResizeAsCopyTyped = recursiveResizeAsCopyTyped utils.flipBoundingBoxes = flipBoundingBoxes +utils.concat = concat return utils From b9e5bdb8451fba589b493a49aa719f120c882429 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Fri, 25 Sep 2015 08:41:03 +0200 Subject: [PATCH 37/79] Avoid memory explosion in RCNN/SPP. Loads images as float --- BatchProvider.lua | 6 ++++-- DataSetPascal.lua | 2 +- RCNN.lua | 12 ++++++------ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index 8e7650d..6fb2631 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -169,8 +169,8 @@ function BatchProvider:prepareFeatures(im_idx,bboxes,fg_label,bg_label) -- compute the features local feats = self.feat_provider:getFeature(im_idx,s_boxes,flip) - local fg_data = feats:narrow(1,1,num_pos) - local bg_data = feats:narrow(1,num_pos+1,num_neg) + local fg_data = num_pos > 0 and feats:narrow(1,1,num_pos) or nil + local bg_data = num_neg > 0 and feats:narrow(1,num_pos+1,num_neg) or nil return fg_data, bg_data end @@ -244,7 +244,9 @@ function BatchProvider:prepareBatch(batches,targets) batches[b][s]:copy(fg_data[j]) targets[b][s]:copy(fg_label[j]) end + collectgarbage() end + collectgarbage() return batches,targets end diff --git a/DataSetPascal.lua b/DataSetPascal.lua index 627e13e..7c4f591 100644 --- a/DataSetPascal.lua +++ b/DataSetPascal.lua @@ -168,7 +168,7 @@ function DataSetPascal:size() end function DataSetPascal:getImage(i) - return image.load(string.format(self.imgpath,self.img_ids[i])) + return image.load(string.format(self.imgpath,self.img_ids[i]),3,'float') end diff --git a/RCNN.lua b/RCNN.lua index eb83118..4765c04 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -118,17 +118,17 @@ function RCNN:getCrop(output,I,bbox) end -- padding > 0 || square ------ - --local patch = image.crop(I,bbox[1],bbox[2],bbox[3],bbox[4]); local patch = I[{{},{bbox[2],bbox[4]},{bbox[1],bbox[3]}}] - local tmp = image.scale(patch,crop_width,crop_height,'bilinear'); + self._crop = self._crop or torch.FloatTensor(3,self.crop_size,self.crop_size) + self._crop:resize(3,crop_height,crop_width) + image.scale(self._crop,patch,'bilinear'); + local tmp = self._crop if image_mean then - tmp = tmp - image_mean[{{},{pad_h+1,pad_h+crop_height}, - {pad_w+1,pad_w+crop_width}}] + tmp:add(-1,image_mean[{{},{pad_h+1,pad_h+crop_height}, + {pad_w+1,pad_w+crop_width}}]) end - --patch = torch.FloatTensor(3,crop_size,crop_size):zero() - output[{{},{pad_h+1,pad_h+crop_height}, {pad_w+1,pad_w+crop_width}}] = tmp return output From aabd91753dc3483e9e962774442a48b7c4117edb Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Fri, 25 Sep 2015 09:00:18 +0200 Subject: [PATCH 38/79] Fix concat when dim is not provided --- utils.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/utils.lua b/utils.lua index 506cc83..785d2a3 100644 --- a/utils.lua +++ b/utils.lua @@ -50,6 +50,7 @@ local function concat(t1,t2,dim) local out assert(t1:type() == t2:type(),'tensors should have the same type') if t1:dim() > 0 and t2:dim() > 0 then + dim = dim or t1:dim() out = torch.cat(t1,t2,dim) elseif t1:dim() > 0 then out = t1:clone() From 1bb94eee200176855a23db3aab3373586c084cc3 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Fri, 25 Sep 2015 20:43:07 +0200 Subject: [PATCH 39/79] Fixing ambiguous constructor in FRCNN and upvalue function in Tester. Fix tests --- FRCNN.lua | 1 + Tester_FRCNN.lua | 48 +++++++-------- tests/test_full_frcnn.lua | 63 +++++++++++++++----- tests/test_full_rcnn.lua | 120 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 194 insertions(+), 38 deletions(-) create mode 100644 tests/test_full_rcnn.lua diff --git a/FRCNN.lua b/FRCNN.lua index 59e682a..d6292f5 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -5,6 +5,7 @@ local FRCNN = torch.class('nnf.FRCNN') local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, + noordered=true, {name="scale", type="table", default={600}, diff --git a/Tester_FRCNN.lua b/Tester_FRCNN.lua index 57fd41f..3ab1469 100644 --- a/Tester_FRCNN.lua +++ b/Tester_FRCNN.lua @@ -55,6 +55,29 @@ function Tester:validate(criterion) return err/num_batches end +local function print_scores(dataset,res) + print('Results:') + -- print class names + io.write('|') + for i = 1, dataset.num_classes do + io.write(('%5s|'):format(dataset.classes[i])) + end + io.write('\n|') + -- print class scores + for i = 1, dataset.num_classes do + local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] + local l = res[i] == res[i] and l-5 or l-3 + if l > 0 then + io.write(('%.3f%'..l..'s|'):format(res[i],' ')) + else + io.write(('%.3f|'):format(res[i])) + end + end + io.write('\n') + io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) +end + + function Tester:test(iteration) local dataset = self.dataset @@ -77,7 +100,7 @@ function Tester:test(iteration) local max_per_set = 5*dataset:size() local max_per_image = 100 - local thresh = torch.ones(dataset.num_classes):mul(-1.5) + local thresh = torch.ones(dataset.num_classes):mul(0.05) local scored_boxes = torch.FloatTensor() local timer = torch.Timer() @@ -168,26 +191,3 @@ function Tester:test(iteration) return res end -local function print_scores(dataset,res) - print('Results:') - -- print class names - io.write('|') - for i = 1, dataset.num_classes do - io.write(('%5s|'):format(dataset.classes[i])) - end - io.write('\n|') - -- print class scores - for i = 1, dataset.num_classes do - local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] - local l = res[i] == res[i] and l-5 or l-3 - if l > 0 then - io.write(('%.3f%'..l..'s|'):format(res[i],' ')) - else - io.write(('%.3f|'):format(res[i])) - end - end - io.write('\n') - io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) -end - - diff --git a/tests/test_full_frcnn.lua b/tests/test_full_frcnn.lua index ee23707..b13f749 100644 --- a/tests/test_full_frcnn.lua +++ b/tests/test_full_frcnn.lua @@ -1,6 +1,10 @@ require 'nnf' +require 'inn' +require 'cudnn' require 'loadcaffe' +cutorch.setDevice(2) + ds = nnf.DataSetPascal{image_set='trainval', datadir='datasets/VOCdevkit', roidbdir='data/selective_search_data' @@ -18,27 +22,49 @@ fp:training() bp = nnf.BatchProviderROI{dataset=ds,feat_provider=fp, bg_threshold={0.1,0.5} } -bp:setupData() +--bp:setupData() -------------------------------------------------------------------------------- -- define model -------------------------------------------------------------------------------- model = nn.Sequential() do - local rcnnfold = '/home/francisco/work/libraries/caffe/examples/imagenet/' + --[[ + local rcnnfold = '/home/francisco/work/projects/object-detection.torch/data/models/imagenet_models/' local base_model = loadcaffe.load( - rcnnfold..'imagenet_deploy.prototxt', - rcnnfold..'caffe_reference_imagenet_model', + rcnnfold..'CaffeNet_train.prototxt', + rcnnfold..'CaffeNet.v2.caffemodel', 'cudnn') - for i=1,14 do features:add(base_model:get(i):clone()) end for i=17,22 do classifier:add(base_model:get(i):clone()) end - classifier:add(nn.Linear(4096,21):cuda()) - + local linear = nn.Linear(4096,21):cuda() + linear.weight:normal(0,0.01) + linear.bias:zero() + classifier:add(linear) + --]] + local features = nn.Sequential() + local classifier = nn.Sequential() + local fold = 'data/models/imagenet_models/alexnet/' + local m1 = torch.load(fold..'features.t7') + local m2 = torch.load(fold..'top.t7') + for i=1,14 do + features:add(m1:get(i):clone()) + end + features:get(3).padW = 1 + features:get(3).padH = 1 + features:get(7).padW = 1 + features:get(7).padH = 1 + for i=2,7 do + classifier:add(m2:get(i):clone()) + end + local linear = nn.Linear(4096,21):cuda() + linear.weight:normal(0,0.01) + linear.bias:zero() + classifier:add(linear) collectgarbage() local prl = nn.ParallelTable() prl:add(features) @@ -48,19 +74,27 @@ do model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) model:add(nn.View(-1):setNumInputDims(3)) model:add(classifier) - end - +model:cuda() +model = nil +collectgarbage() +model = torch.load('test_model.t7') +model:cuda() +collectgarbage() -------------------------------------------------------------------------------- -- train -------------------------------------------------------------------------------- -criterion = nn.CrossEntropyCriterion() +criterion = nn.CrossEntropyCriterion():cuda() trainer = nnf.Trainer(model,criterion,bp) -for i=1,10 do - trainer:train(10) +for i=1,0 do + if i == 300 then + trainer.optimState.learningRate = trainer.optimState.learningRate/10 + end + xlua.progress(i,400) + trainer:train(100) end -------------------------------------------------------------------------------- @@ -68,7 +102,7 @@ end -------------------------------------------------------------------------------- -- add softmax to classfier -model:add(nn.SoftMax()) +model:add(nn.SoftMax():cuda()) dsv = nnf.DataSetPascal{image_set='test', datadir='datasets/VOCdevkit', @@ -78,7 +112,8 @@ dsv = nnf.DataSetPascal{image_set='test', fpv = nnf.FRCNN{image_transformer=image_transformer} fpv:evaluate() +exp_name = 'test1_frcnn' tester = nnf.Tester_FRCNN(model,fpv,dsv) tester.cachefolder = 'cachedir/'..exp_name -tester:test(num_iter) +tester:test(40000) diff --git a/tests/test_full_rcnn.lua b/tests/test_full_rcnn.lua new file mode 100644 index 0000000..ea3d8a6 --- /dev/null +++ b/tests/test_full_rcnn.lua @@ -0,0 +1,120 @@ +require 'nnf' +require 'inn' +require 'cudnn' +require 'loadcaffe' + +cutorch.setDevice(2) + +ds = nnf.DataSetPascal{image_set='trainval', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } +local image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} + +fp = nnf.RCNN{image_transformer=image_transformer, + crop_size=224} +fp:training() +-------------------------------------------------------------------------------- +-- define batch providers +-------------------------------------------------------------------------------- + +bp = nnf.BatchProvider{dataset=ds,feat_provider=fp, + bg_threshold={0.0,0.5}, + nTimesMoreData=2, + iter_per_batch=100, + } +bp:setupData() + +-------------------------------------------------------------------------------- +-- define model +-------------------------------------------------------------------------------- +model = nn.Sequential() +do + --[[ + local rcnnfold = '/home/francisco/work/projects/object-detection.torch/data/models/imagenet_models/' + local base_model = loadcaffe.load( + rcnnfold..'CaffeNet_train.prototxt', + rcnnfold..'CaffeNet.v2.caffemodel', + 'cudnn') + for i=1,14 do + features:add(base_model:get(i):clone()) + end + for i=17,22 do + classifier:add(base_model:get(i):clone()) + end + local linear = nn.Linear(4096,21):cuda() + linear.weight:normal(0,0.01) + linear.bias:zero() + classifier:add(linear) + --]] + local features = nn.Sequential() + local classifier = nn.Sequential() + local fold = 'data/models/imagenet_models/alexnet/' + local m1 = torch.load(fold..'features.t7') + local m2 = torch.load(fold..'top.t7') + for i=1,14 do + features:add(m1:get(i):clone()) + end + features:get(3).padW = 1 + features:get(3).padH = 1 + features:get(7).padW = 1 + features:get(7).padH = 1 + for i=2,7 do + classifier:add(m2:get(i):clone()) + end + local linear = nn.Linear(4096,21):cuda() + linear.weight:normal(0,0.01) + linear.bias:zero() + classifier:add(linear) + collectgarbage() + --local prl = nn.ParallelTable() + --prl:add(features) + --prl:add(nn.Identity()) + --model:add(prl) + --model:add(nnf.ROIPooling(6,6):setSpatialScale(1/16)) + --model:add(inn.ROIPooling(6,6):setSpatialScale(1/16)) + model:add(features) + model:add(nn.SpatialAdaptiveMaxPooling(6,6)) + model:add(nn.View(-1):setNumInputDims(3)) + model:add(classifier) +end +model:cuda() +-------------------------------------------------------------------------------- +-- train +-------------------------------------------------------------------------------- + +criterion = nn.CrossEntropyCriterion():cuda() + +trainer = nnf.Trainer(model,criterion,bp) + +for i=1,400 do + if i == 300 then + trainer.optimState.learningRate = trainer.optimState.learningRate/10 + end + print(('Iteration %3d/%-3d'):format(i,400)) + trainer:train(100) +end + +-------------------------------------------------------------------------------- +-- evaluate +-------------------------------------------------------------------------------- + +-- add softmax to classfier +model:add(nn.SoftMax():cuda()) + +dsv = nnf.DataSetPascal{image_set='test', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } + + +fpv = nnf.RCNN{image_transformer=image_transformer, + crop_size=224} +fpv:evaluate() +exp_name = 'test1_rcnn' + +tester = nnf.Tester_FRCNN(model,fpv,dsv) +tester.cachefolder = 'cachedir/'..exp_name +tester:test(40000) From 6f76215029fa9a046452b2e37a37fa20dfdcc93d Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Tue, 29 Sep 2015 19:15:52 +0200 Subject: [PATCH 40/79] Fix ambiguous call to constructor in RCNN --- RCNN.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/RCNN.lua b/RCNN.lua index 4765c04..346f4db 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -4,6 +4,7 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local argcheck = require 'argcheck' local initcheck = argcheck{ pack=true, + noordered=true, {name="crop_size", type="number", default=227, From 56d6b72d58c1841558fda767eea9de7a109224aa Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Wed, 30 Sep 2015 08:53:49 +0200 Subject: [PATCH 41/79] Fix problem with argcheck in SPP Create a new argcheck function with the same interface as the original one, but deals with any number of arguments --- SPP.lua | 16 ++++++------ argcheck.lua | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 8 deletions(-) create mode 100644 argcheck.lua diff --git a/SPP.lua b/SPP.lua index 8b5234f..421dcf1 100644 --- a/SPP.lua +++ b/SPP.lua @@ -5,8 +5,8 @@ local SPP = torch.class('nnf.SPP') -- argcheck crashes with that many arguments, and using unordered -- doesn't seems practical ---[[ -local argcheck = require 'argcheck' +-- [[ +local argcheck = paths.dofile('argcheck.lua')--require 'argcheck' local initcheck = argcheck{ pack=true, {name="model", @@ -69,18 +69,18 @@ function SPP:__init(...) self.dataset = dataset self.model = model - --local opts = initcheck(...) - --for k,v in pairs(opts) do self[k] = v end + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end - self.num_feat_chns = 256 - self.pooling_scales = {{1,1},{2,2},{3,3},{6,6}} + --self.num_feat_chns = 256 + --self.pooling_scales = {{1,1},{2,2},{3,3},{6,6}} local pyr = torch.Tensor(self.pooling_scales):t() local pooled_size = pyr[1]:dot(pyr[2]) self.output_size = {self.num_feat_chns*pooled_size} --self.spp_pooler = inn.SpatialPyramidPooling(self.pooling_scales):float() - self.image_transformer = nnf.ImageTransformer{} --- [[ + --self.image_transformer = nnf.ImageTransformer{} +--[[ -- paper=864, their code=874 self.scales = {480,576,688,874,1200} -- 874 diff --git a/argcheck.lua b/argcheck.lua new file mode 100644 index 0000000..2ce4e3b --- /dev/null +++ b/argcheck.lua @@ -0,0 +1,73 @@ +local usage = require 'argcheck.usage' +local env = require 'argcheck.env' +-------------------------------------------------------------------------------- +-- Simple argument function with a similar interface to argcheck, but which +-- supports lots of default arguments for named rules. +-- Not as fast and elegant though. +-------------------------------------------------------------------------------- +local function argcheck(rules) + -- basic checks + assert(not (rules.noordered and rules.nonamed), 'rules must be at least ordered or named') + assert(rules.help == nil or type(rules.help) == 'string', 'rules help must be a string or nil') + assert(rules.doc == nil or type(rules.doc) == 'string', 'rules doc must be a string or nil') + assert(not rules.overload, 'rules overload not supported') + assert(not (rules.doc and rules.help), 'choose between doc or help, not both') + for _, rule in ipairs(rules) do + assert(rule.name, 'rule must have a name field') + assert(rule.type == nil or type(rule.type) == 'string', 'rule type must be a string or nil') + assert(rule.help == nil or type(rule.help) == 'string', 'rule help must be a string or nil') + assert(rule.doc == nil or type(rule.doc) == 'string', 'rule doc must be a string or nil') + assert(rule.check == nil or type(rule.check) == 'function', 'rule check must be a function or nil') + --assert(rule.defaulta == nil or type(rule.defaulta) == 'string', 'rule defaulta must be a string or nil') + --assert(rule.defaultf == nil or type(rule.defaultf) == 'function', 'rule defaultf must be a function or nil') + end + + if not (rules.pack == nil or rules.pack) then + error('pack need to be true') + end + if rules.nonamed then + error('only named arguments') + end + + local arginfo = {} + for k,v in ipairs(rules) do + arginfo[v.name] = k + end + + local function func(args) + + local iargs = {} + for _,rule in ipairs(rules) do + iargs[rule.name] = rule.default + if rule.default == nil and + args[rule.name] == nil and + rule.opt ~= true then + print(usage(rules)) + error('Missing argument: '..rule.name) + end + end + + for k,v in pairs(args) do + if not env.istype(v,rules[arginfo[k]].type) then + print(usage(rules)) + error('Wrong type: '.. k) + end + + if rules[arginfo[k]].check then + local c = rules[arginfo[k]].check(args[k]) + if not c then + print(usage(rules)) + error('check did not pass') + end + end + iargs[k] = args[k] + end + + return iargs + end + + return func + +end + +return argcheck From a99dcf4dbf4529acf4d69e486e43473219d10ef4 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Wed, 30 Sep 2015 09:12:24 +0200 Subject: [PATCH 42/79] Refactoring a bit more DataSetPascal --- DataSetPascal.lua | 61 ++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/DataSetPascal.lua b/DataSetPascal.lua index 7c4f591..2279a4f 100644 --- a/DataSetPascal.lua +++ b/DataSetPascal.lua @@ -1,5 +1,5 @@ local matio = require 'matio' -local argcheck = require 'argcheck' +local argcheck = dofile'argcheck.lua'--require 'argcheck' local xml = require 'xml' local concat = paths.dofile('utils.lua').concat @@ -60,6 +60,7 @@ local initcheck = argcheck{ if type(v) ~= 'string' then print('classes can only be of string input'); out = false + break end end return out @@ -310,6 +311,35 @@ function DataSetPascal:getGTBoxes(i) end +function DataSetPascal:bestOverlap(all_boxes, gt_boxes, gt_classes) + local num_total_boxes = all_boxes:size(1) + local num_gt_boxes = #gt_boxes + local overlap_class = torch.FloatTensor(num_total_boxes,self.num_classes):zero() + local overlap = torch.FloatTensor(num_total_boxes,num_gt_boxes):zero() + for idx=1,num_gt_boxes do + local o = boxoverlap(all_boxes,gt_boxes[idx]) + local tmp = overlap_class[{{},gt_classes[idx]}] -- pointer copy + tmp[tmp:lt(o)] = o[tmp:lt(o)] + overlap[{{},idx}] = o + end + -- get max class overlap + --rec.overlap,rec.label = rec.overlap:max(2) + --rec.overlap = torch.squeeze(rec.overlap,2) + --rec.label = torch.squeeze(rec.label,2) + --rec.label[rec.overlap:eq(0)] = 0 + local correspondance + if num_gt_boxes > 0 then + overlap,correspondance = overlap:max(2) + overlap = torch.squeeze(overlap,2) + correspondance = torch.squeeze(correspondance,2) + correspondance[overlap:eq(0)] = 0 + else + overlap = torch.FloatTensor(num_total_boxes):zero() + correspondance = torch.LongTensor(num_total_boxes):zero() + end + return overlap, correspondance, overlap_class +end + function DataSetPascal:attachProposals(i) if not self.roidb then @@ -328,34 +358,13 @@ function DataSetPascal:attachProposals(i) rec.gt = concat(torch.ByteTensor(num_gt_boxes):fill(1), torch.ByteTensor(num_boxes):fill(0) ) - rec.overlap_class = torch.FloatTensor(num_boxes+num_gt_boxes,self.num_classes):fill(0) - rec.overlap = torch.FloatTensor(num_boxes+num_gt_boxes,num_gt_boxes):fill(0) - for idx=1,num_gt_boxes do - local o = boxoverlap(all_boxes,gt_boxes[idx]) - local tmp = rec.overlap_class[{{},gt_classes[idx]}] -- pointer copy - tmp[tmp:lt(o)] = o[tmp:lt(o)] - rec.overlap[{{},idx}] = o - end - -- get max class overlap - --rec.overlap,rec.label = rec.overlap:max(2) - --rec.overlap = torch.squeeze(rec.overlap,2) - --rec.label = torch.squeeze(rec.label,2) - --rec.label[rec.overlap:eq(0)] = 0 - - if num_gt_boxes > 0 then - rec.overlap,rec.correspondance = rec.overlap:max(2) - rec.overlap = torch.squeeze(rec.overlap,2) - rec.correspondance = torch.squeeze(rec.correspondance,2) - rec.correspondance[rec.overlap:eq(0)] = 0 - else - rec.overlap = torch.FloatTensor(num_boxes+num_gt_boxes):fill(0) - rec.correspondance = torch.LongTensor(num_boxes+num_gt_boxes):fill(0) - end + rec.overlap, rec.correspondance, rec.overlap_class = + self:bestOverlap(all_boxes,gt_boxes,gt_classes) rec.label = torch.IntTensor(num_boxes+num_gt_boxes):fill(0) for idx=1,(num_boxes+num_gt_boxes) do local corr = rec.correspondance[idx] if corr > 0 then - rec.label[idx] = self.class_to_id[anno.object[valid_objects[corr] ].name] + rec.label[idx] = gt_classes[corr] end end @@ -368,8 +377,6 @@ function DataSetPascal:attachProposals(i) for _,idx in pairs(valid_objects) do table.insert(rec.objects,anno.object[idx]) end - else - rec.correspondance = nil end function rec:size() From f9b53a7772d924c37669c4f56715077a9a5d32b1 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 1 Oct 2015 09:44:44 +0200 Subject: [PATCH 43/79] Depreceating old Tester and renaming the new one --- Tester.lua | 130 +++++++++------------ Tester_FRCNN.lua => Tester_depreceated.lua | 130 ++++++++++++--------- 2 files changed, 130 insertions(+), 130 deletions(-) rename Tester_FRCNN.lua => Tester_depreceated.lua (71%) diff --git a/Tester.lua b/Tester.lua index 4c84ace..3ab1469 100644 --- a/Tester.lua +++ b/Tester.lua @@ -4,16 +4,13 @@ local nms = paths.dofile('nms.lua') local keep_top_k = utils.keep_top_k local VOCevaldet = utils.VOCevaldet -local Tester = torch.class('nnf.Tester') +local Tester = torch.class('nnf.Tester_FRCNN') -function Tester:__init(module,feat_provider) - self.dataset = feat_provider.dataset - self.module = module +function Tester:__init(module,feat_provider,dataset) + self.dataset = dataset self.feat_provider = feat_provider + self.module = module - self.feat_dim = {256*50} - self.max_batch_size = 4000 - self.cachefolder = nil self.cachename = nil self.suffix = '' @@ -58,30 +55,44 @@ function Tester:validate(criterion) return err/num_batches end +local function print_scores(dataset,res) + print('Results:') + -- print class names + io.write('|') + for i = 1, dataset.num_classes do + io.write(('%5s|'):format(dataset.classes[i])) + end + io.write('\n|') + -- print class scores + for i = 1, dataset.num_classes do + local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] + local l = res[i] == res[i] and l-5 or l-3 + if l > 0 then + io.write(('%.3f%'..l..'s|'):format(res[i],' ')) + else + io.write(('%.3f|'):format(res[i])) + end + end + io.write('\n') + io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) +end + + function Tester:test(iteration) local dataset = self.dataset local module = self.module local feat_provider = self.feat_provider - local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) - paths.mkdir(pathfolder) - module:evaluate() + feat_provider:evaluate() dataset:loadROIDB() - local feats = torch.FloatTensor() - local feats_batched = {} - local feats_cuda = torch.CudaTensor() - - local output = torch.FloatTensor() - - local output_dim = module:get(module:size()) - - local softmax = nn.SoftMax():float() - + local detec = nnf.ImageDetect(module, feat_provider) local boxes - -- + local im + local output + local aboxes = {} for i=1,dataset.num_classes do table.insert(aboxes,{}) @@ -89,50 +100,31 @@ function Tester:test(iteration) local max_per_set = 5*dataset:size() local max_per_image = 100 - local thresh = torch.ones(dataset.num_classes):mul(-1.5) + local thresh = torch.ones(dataset.num_classes):mul(0.05) local scored_boxes = torch.FloatTensor() local timer = torch.Timer() local timer2 = torch.Timer() local timer3 = torch.Timer() - + for i=1,dataset:size() do timer:reset() io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); boxes = dataset:getROIBoxes(i):float() - local num_boxes = boxes:size(1) - -- compute image feature maps - timer3:reset() - feats:resize(num_boxes,unpack(self.feat_dim)) - for idx=1,num_boxes do - feats[idx] = feat_provider:getFeature(i,boxes[idx]) - end - local tt = timer3:time().real - -- compute classification scores - torch.split(feats_batched,feats,self.max_batch_size,1) + im = dataset:getImage(i) timer3:reset() - for idx,f in ipairs(feats_batched) do - local fs = f:size(1) - feats_cuda:resize(fs,unpack(self.feat_dim)):copy(f) - module:forward(feats_cuda) - if idx == 1 then - local out_size = module.output:size():totable() - table.remove(out_size,1) - output:resize(num_boxes,unpack(out_size)) - end - output:narrow(1,(idx-1)*self.max_batch_size+1,fs):copy(module.output) - end - local add_bg = 0 - if dataset.num_classes ~= output:size(2) then -- if there is no svm - output = softmax:forward(output) - add_bg = 1 - end - + output,boxes = detec:detect(im,boxes) + + local add_bg = 1 + local tt = 0 local tt2 = timer3:time().real timer2:reset() + -- do a NMS for each class, based on the scores from the classifier for j=1,dataset.num_classes do local scores = output:select(2,j+add_bg) + -- only select detections with a score greater than thresh + -- this avoid doing NMS on too many bboxes with low score local idx = torch.range(1,scores:numel()):long() local idx2 = scores:gt(thresh[j]) idx = idx[idx2] @@ -151,6 +143,7 @@ function Tester:test(iteration) aboxes[j][i] = torch.FloatTensor() end + -- remove low scoring boxes and update threshold if i%1000 == 0 then aboxes[j],thresh[j] = keep_top_k(aboxes[j],max_per_set) end @@ -158,10 +151,11 @@ function Tester:test(iteration) end io.write((' prepare feat time: %.3f, forward time: %.3f, select time: %.3fs, total time: %.3fs\n'):format(tt,tt2,timer2:time().real,timer:time().real)); - --collectgarbage() - --mattorch.save(paths.concat(pathfolder,dataset.img_ids[i]..'.mat'),output:double()) end + local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) + paths.mkdir(pathfolder) + for i = 1,dataset.num_classes do -- go back through and prune out detections below the found threshold for j = 1,dataset:size() do @@ -174,10 +168,14 @@ function Tester:test(iteration) end end end - save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. - dataset.dataset_name..self.suffix) - torch.save(save_file, aboxes) + --save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. + -- dataset.dataset_name..self.suffix) + --torch.save(save_file, aboxes) end + save_file = paths.concat(pathfolder, 'boxes_'.. + dataset.dataset_name..self.suffix) + torch.save(save_file, aboxes) + local res = {} for i=1,dataset.num_classes do @@ -185,27 +183,11 @@ function Tester:test(iteration) res[i] = VOCevaldet(dataset,aboxes[i],cls) end res = torch.Tensor(res) - print('Results:') - -- print class names - io.write('|') - for i = 1, dataset.num_classes do - io.write(('%5s|'):format(dataset.classes[i])) - end - io.write('\n|') - -- print class scores - for i = 1, dataset.num_classes do - local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] - local l = res[i] == res[i] and l-5 or l-3 - if l > 0 then - io.write(('%.3f%'..l..'s|'):format(res[i],' ')) - else - io.write(('%.3f|'):format(res[i])) - end - end - io.write('\n') - io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) + + print_scores(dataset,res) -- clean roidb to free memory dataset.roidb = nil return res end + diff --git a/Tester_FRCNN.lua b/Tester_depreceated.lua similarity index 71% rename from Tester_FRCNN.lua rename to Tester_depreceated.lua index 3ab1469..4c84ace 100644 --- a/Tester_FRCNN.lua +++ b/Tester_depreceated.lua @@ -4,13 +4,16 @@ local nms = paths.dofile('nms.lua') local keep_top_k = utils.keep_top_k local VOCevaldet = utils.VOCevaldet -local Tester = torch.class('nnf.Tester_FRCNN') +local Tester = torch.class('nnf.Tester') -function Tester:__init(module,feat_provider,dataset) - self.dataset = dataset - self.feat_provider = feat_provider +function Tester:__init(module,feat_provider) + self.dataset = feat_provider.dataset self.module = module + self.feat_provider = feat_provider + self.feat_dim = {256*50} + self.max_batch_size = 4000 + self.cachefolder = nil self.cachename = nil self.suffix = '' @@ -55,44 +58,30 @@ function Tester:validate(criterion) return err/num_batches end -local function print_scores(dataset,res) - print('Results:') - -- print class names - io.write('|') - for i = 1, dataset.num_classes do - io.write(('%5s|'):format(dataset.classes[i])) - end - io.write('\n|') - -- print class scores - for i = 1, dataset.num_classes do - local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] - local l = res[i] == res[i] and l-5 or l-3 - if l > 0 then - io.write(('%.3f%'..l..'s|'):format(res[i],' ')) - else - io.write(('%.3f|'):format(res[i])) - end - end - io.write('\n') - io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) -end - - function Tester:test(iteration) local dataset = self.dataset local module = self.module local feat_provider = self.feat_provider + local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) + paths.mkdir(pathfolder) + module:evaluate() - feat_provider:evaluate() dataset:loadROIDB() - local detec = nnf.ImageDetect(module, feat_provider) + local feats = torch.FloatTensor() + local feats_batched = {} + local feats_cuda = torch.CudaTensor() + + local output = torch.FloatTensor() + + local output_dim = module:get(module:size()) + + local softmax = nn.SoftMax():float() + local boxes - local im - local output - + -- local aboxes = {} for i=1,dataset.num_classes do table.insert(aboxes,{}) @@ -100,31 +89,50 @@ function Tester:test(iteration) local max_per_set = 5*dataset:size() local max_per_image = 100 - local thresh = torch.ones(dataset.num_classes):mul(0.05) + local thresh = torch.ones(dataset.num_classes):mul(-1.5) local scored_boxes = torch.FloatTensor() local timer = torch.Timer() local timer2 = torch.Timer() local timer3 = torch.Timer() - + for i=1,dataset:size() do timer:reset() io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); boxes = dataset:getROIBoxes(i):float() - im = dataset:getImage(i) + local num_boxes = boxes:size(1) + -- compute image feature maps timer3:reset() - output,boxes = detec:detect(im,boxes) - - local add_bg = 1 - local tt = 0 + feats:resize(num_boxes,unpack(self.feat_dim)) + for idx=1,num_boxes do + feats[idx] = feat_provider:getFeature(i,boxes[idx]) + end + local tt = timer3:time().real + -- compute classification scores + torch.split(feats_batched,feats,self.max_batch_size,1) + timer3:reset() + for idx,f in ipairs(feats_batched) do + local fs = f:size(1) + feats_cuda:resize(fs,unpack(self.feat_dim)):copy(f) + module:forward(feats_cuda) + if idx == 1 then + local out_size = module.output:size():totable() + table.remove(out_size,1) + output:resize(num_boxes,unpack(out_size)) + end + output:narrow(1,(idx-1)*self.max_batch_size+1,fs):copy(module.output) + end + local add_bg = 0 + if dataset.num_classes ~= output:size(2) then -- if there is no svm + output = softmax:forward(output) + add_bg = 1 + end + local tt2 = timer3:time().real timer2:reset() - -- do a NMS for each class, based on the scores from the classifier for j=1,dataset.num_classes do local scores = output:select(2,j+add_bg) - -- only select detections with a score greater than thresh - -- this avoid doing NMS on too many bboxes with low score local idx = torch.range(1,scores:numel()):long() local idx2 = scores:gt(thresh[j]) idx = idx[idx2] @@ -143,7 +151,6 @@ function Tester:test(iteration) aboxes[j][i] = torch.FloatTensor() end - -- remove low scoring boxes and update threshold if i%1000 == 0 then aboxes[j],thresh[j] = keep_top_k(aboxes[j],max_per_set) end @@ -151,11 +158,10 @@ function Tester:test(iteration) end io.write((' prepare feat time: %.3f, forward time: %.3f, select time: %.3fs, total time: %.3fs\n'):format(tt,tt2,timer2:time().real,timer:time().real)); + --collectgarbage() + --mattorch.save(paths.concat(pathfolder,dataset.img_ids[i]..'.mat'),output:double()) end - local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) - paths.mkdir(pathfolder) - for i = 1,dataset.num_classes do -- go back through and prune out detections below the found threshold for j = 1,dataset:size() do @@ -168,14 +174,10 @@ function Tester:test(iteration) end end end - --save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. - -- dataset.dataset_name..self.suffix) - --torch.save(save_file, aboxes) + save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. + dataset.dataset_name..self.suffix) + torch.save(save_file, aboxes) end - save_file = paths.concat(pathfolder, 'boxes_'.. - dataset.dataset_name..self.suffix) - torch.save(save_file, aboxes) - local res = {} for i=1,dataset.num_classes do @@ -183,11 +185,27 @@ function Tester:test(iteration) res[i] = VOCevaldet(dataset,aboxes[i],cls) end res = torch.Tensor(res) - - print_scores(dataset,res) + print('Results:') + -- print class names + io.write('|') + for i = 1, dataset.num_classes do + io.write(('%5s|'):format(dataset.classes[i])) + end + io.write('\n|') + -- print class scores + for i = 1, dataset.num_classes do + local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] + local l = res[i] == res[i] and l-5 or l-3 + if l > 0 then + io.write(('%.3f%'..l..'s|'):format(res[i],' ')) + else + io.write(('%.3f|'):format(res[i])) + end + end + io.write('\n') + io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) -- clean roidb to free memory dataset.roidb = nil return res end - From 274d8ffe4a597307cb277469efcc30236fbea7bc Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 1 Oct 2015 11:35:17 +0200 Subject: [PATCH 44/79] Add visualization function --- tests/test_visualization.lua | 7 ++++++ visualize_detections.lua | 46 ++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 tests/test_visualization.lua create mode 100644 visualize_detections.lua diff --git a/tests/test_visualization.lua b/tests/test_visualization.lua new file mode 100644 index 0000000..b5d727a --- /dev/null +++ b/tests/test_visualization.lua @@ -0,0 +1,7 @@ +dofile 'tests/test_utils.lua' +I = ds:getImage(1) +boxes = ds:getROIBoxes(1) +scores = torch.rand(boxes:size(1),21) +dofile 'visualize_detections.lua' +visualize_detections(I,boxes,scores,0.9) + diff --git a/visualize_detections.lua b/visualize_detections.lua new file mode 100644 index 0000000..5d14bdc --- /dev/null +++ b/visualize_detections.lua @@ -0,0 +1,46 @@ +function visualize_detections(im,boxes,scores,thresh) + local ok = pcall(require,'qt') + if not ok then + error('You need to run visualize_detections using qlua') + end + require 'qttorch' + require 'qtwidget' + + -- select best scoring boxes without background + local max_score,idx = scores[{{},{2,-1}}]:max(2) + + local idx_thresh = max_score:gt(thresh) + max_score = max_score[idx_thresh] + idx = idx[idx_thresh] + + local r = torch.range(1,boxes:size(1)):long() + local rr = r[idx_thresh] + local boxes_thresh = boxes:index(1,rr) + + local num_boxes = boxes_thresh:size(1) + local widths = boxes_thresh[{{},3}] - boxes_thresh[{{},1}] + local heights = boxes_thresh[{{},4}] - boxes_thresh[{{},2}] + + local x,y = im:size(3),im:size(2) + local w = qtwidget.newwindow(x,y,"test") + local qtimg = qt.QImage.fromTensor(im) + w:image(0,0,x,y,qtimg) + local fontsize = 10 + + for i=1,num_boxes do + local x,y = boxes_thresh[{i,1}],boxes_thresh[{i,2}] + local width,height = widths[i], heights[i] + + -- add bbox + w:rectangle(x,y,width,height) + + -- add score + w:moveto(x,y+fontsize) + w:setcolor("red") + w:setfont(qt.QFont{serif=true,italic=true,size=fontsize,bold=true}) + w:show(string.format('%d: %.2f',idx[i],max_score[i])) + end + w:setcolor("red") + w:setlinewidth(2) + w:stroke() +end From d17c5008c64822150cb031004368652a28fcf6d7 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 1 Oct 2015 11:39:28 +0200 Subject: [PATCH 45/79] Fix bug with DataSetPascal and rename Tester class --- DataSetPascal.lua | 5 +++-- Tester.lua | 14 ++++++++++++-- nnf.lua | 2 +- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/DataSetPascal.lua b/DataSetPascal.lua index 2279a4f..a37b0b0 100644 --- a/DataSetPascal.lua +++ b/DataSetPascal.lua @@ -313,7 +313,7 @@ end function DataSetPascal:bestOverlap(all_boxes, gt_boxes, gt_classes) local num_total_boxes = all_boxes:size(1) - local num_gt_boxes = #gt_boxes + local num_gt_boxes = gt_boxes:dim() > 0 and gt_boxes:size(1) or 0 local overlap_class = torch.FloatTensor(num_total_boxes,self.num_classes):zero() local overlap = torch.FloatTensor(num_total_boxes,num_gt_boxes):zero() for idx=1,num_gt_boxes do @@ -392,10 +392,11 @@ function DataSetPascal:createROIs() end self.rois = {} for i=1,self.num_imgs do - xlua.progress(i,self.num_imgs) table.insert(self.rois,self:attachProposals(i)) if i%500 == 0 then + xlua.progress(i,self.num_imgs) collectgarbage() end end + xlua.progress(self.num_imgs,self.num_imgs) end diff --git a/Tester.lua b/Tester.lua index 3ab1469..5ff2bc1 100644 --- a/Tester.lua +++ b/Tester.lua @@ -4,7 +4,7 @@ local nms = paths.dofile('nms.lua') local keep_top_k = utils.keep_top_k local VOCevaldet = utils.VOCevaldet -local Tester = torch.class('nnf.Tester_FRCNN') +local Tester = torch.class('nnf.Tester') function Tester:__init(module,feat_provider,dataset) self.dataset = dataset @@ -107,11 +107,21 @@ function Tester:test(iteration) local timer2 = torch.Timer() local timer3 = torch.Timer() + -- SPP is more efficient if we cache the features. We treat it differently then + -- the other feature providers + local pass_index = torch.type(feat_provider) == 'nnf.SPP' and true or false + for i=1,dataset:size() do timer:reset() io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); + + if pass_index then + im = i + else + im = dataset:getImage(i) + end boxes = dataset:getROIBoxes(i):float() - im = dataset:getImage(i) + timer3:reset() output,boxes = detec:detect(im,boxes) diff --git a/nnf.lua b/nnf.lua index d43cef9..c41ebfc 100644 --- a/nnf.lua +++ b/nnf.lua @@ -19,7 +19,7 @@ torch.include('nnf','ROIPooling.lua') torch.include('nnf','Trainer.lua') torch.include('nnf','Tester.lua') -torch.include('nnf','Tester_FRCNN.lua') +--torch.include('nnf','Tester_FRCNN.lua') torch.include('nnf','SVMTrainer.lua') From 569684bb4f21dc71a9f38603939f7f1251215360 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 1 Oct 2015 11:40:06 +0200 Subject: [PATCH 46/79] Adapt test utils to new constructors --- tests/test_utils.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_utils.lua b/tests/test_utils.lua index f2e79e8..e3d20dc 100644 --- a/tests/test_utils.lua +++ b/tests/test_utils.lua @@ -40,9 +40,9 @@ ds = getDS() model1, model, features, classifier = getModel() -fp1 = nnf.RCNN() -fp2 = nnf.FRCNN() -fp3 = nnf.SPP(features) +fp1 = nnf.RCNN{} +fp2 = nnf.FRCNN{} +fp3 = nnf.SPP{model=features} fp3.use_cache = false fp3:evaluate() From 25c2bff56867d068b2ae51c0c808b4abce0f0986 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 4 Oct 2015 19:29:22 +0200 Subject: [PATCH 47/79] Add DataSetCOCO --- DataSetCOCO.lua | 155 +++++++++++++++++++++++++++++++++++++++++++ DataSetDetection.lua | 112 +++++++++++++++++++++++++++++++ DataSetPascal.lua | 111 ++----------------------------- nnf.lua | 3 + utils.lua | 2 +- 5 files changed, 276 insertions(+), 107 deletions(-) create mode 100644 DataSetCOCO.lua create mode 100644 DataSetDetection.lua diff --git a/DataSetCOCO.lua b/DataSetCOCO.lua new file mode 100644 index 0000000..6b2a2e0 --- /dev/null +++ b/DataSetCOCO.lua @@ -0,0 +1,155 @@ +--local json = require 'dkjson' + +local DataSetCOCO,parent = torch.class('nnf.DataSetCOCO', 'nnf.DataSetDetection') + +function DataSetCOCO:__init(annFile) + self.image_set = nil + self.dataset_name = 'COCO' + + local timer = torch.Timer() + local localtimer = torch.Timer() + print('Preparing COCO dataset...') + --[[ + if type(annFile) == 'string' then + local f = io.open(annFile) + local str = f:read('*all') + f:close() + + self.data = json.decode(str) + + else + self.data = torch.load(annFile) + end + --]] + self.data = torch.load('coco_val.t7') + print((' Loaded annotations file in %.2fs'):format(localtimer:time().real)) + localtimer:reset() + + -- mapping images + local img_idx = {} + local img_idx_map = {} + for i = 1, #self.data.images do + table.insert(img_idx,self.data.images[i].id) + img_idx_map[self.data.images[i].id] = i + end + print((' Mapped images in %.4fs'):format(localtimer:time().real)) + localtimer:reset() + + -- mapping annotations + local ann = self.data.annotations + local o = {} + + for k, v in ipairs(ann) do + table.insert(o,v.image_id*1e10 + v.category_id) + end + o = torch.LongTensor(o) + local _,ox = o:sort() + local o_data = ox:data() + local temp_ann = {} + for i=1 , o:size(1) do + table.insert(temp_ann, ann[ox[i] ]) + end + self.data.annotations = temp_ann + + local ann_idx = {} + local ann_idx_map = {} + local ann_img_idx = {} + local img_ann_idx_map = {} + for k,v in ipairs(temp_ann) do + table.insert(ann_idx, v.id) + ann_idx_map[v.id] = k + table.insert(ann_img_idx, v.image_id) + if not img_ann_idx_map[v.image_id] then + img_ann_idx_map[v.image_id] = {} + end + table.insert(img_ann_idx_map[v.image_id],v.id) + end + + self.inds = {img_idx = img_idx, + img_idx_map = img_idx_map, + ann_idx = ann_idx, + ann_idx_map = ann_idx_map, + ann_img_idx = ann_img_idx, + img_ann_idx_map = img_ann_idx_map + } + print((' Mapped annotations in %.4fs'):format(localtimer:time().real)) + localtimer:reset() + + -- mapping classes + self.classes = {} + self.class_to_id = {} + self.class_cont = {} + self.class_cont_map = {} + self.num_classes = 0 + for k,v in ipairs(self.data.categories) do + self.classes[v.id] = v.name + self.class_to_id[v.name] = v.id + table.insert(self.class_cont,v.id) + self.class_cont_map[v.id] = k + self.num_classes = self.num_classes + 1 + end + + print((' Total elapsed time: %.4fs'):format(timer:time().real)) + +end + +function DataSetCOCO:getImage(i) + local file_name = self.images[i].file_name + return image.load(paths.concat(self.imgpath,file_name),3,'float') +end + +function DataSetCOCO:getAnnotation(i) + local ann = {object = {}} + local im_id = self.inds.img_idx[i] + local ann_id = self.inds.img_ann_idx_map[im_id] or {} + for k,v in ipairs(ann_id) do + local lann = self.data.annotations[self.inds.ann_idx_map[v] ] + local bbox = {xmin=lann.bbox[1]+1,ymin=lann.bbox[2]+1, + xmax=lann.bbox[1]+lann.bbox[3]+1, + ymax=lann.bbox[2]+lann.bbox[4]+1, + } + local obj = {bndbox=bbox, + class=lann.category_id, + difficult = '0', + name = self.classes[lann.category_id] + } + table.insert(ann.object,obj) + end + return ann +end + +function DataSetCOCO:getGTBoxes(i) + local anno = self:getAnnotation(i) + local valid_objects = {} + local gt_boxes = torch.IntTensor() + local gt_classes = {} + + if self.with_hard_samples then -- inversed with respect to RCNN code + for idx,obj in ipairs(anno.object) do + if self.class_to_id[obj.name] then -- to allow a subset of the classes + table.insert(valid_objects,idx) + end + end + else + for idx,obj in ipairs(anno.object) do + if obj.difficult == '0' and self.class_to_id[obj.name] then + table.insert(valid_objects,idx) + end + end + end + + gt_boxes:resize(#valid_objects,4) + for idx0,idx in ipairs(valid_objects) do + gt_boxes[idx0][1] = anno.object[idx].bndbox.xmin + gt_boxes[idx0][2] = anno.object[idx].bndbox.ymin + gt_boxes[idx0][3] = anno.object[idx].bndbox.xmax + gt_boxes[idx0][4] = anno.object[idx].bndbox.ymax + + table.insert(gt_classes,self.class_cont_map[anno.object[idx].class]) + end + + return gt_boxes,gt_classes,valid_objects,anno + +end + + diff --git a/DataSetDetection.lua b/DataSetDetection.lua new file mode 100644 index 0000000..1933a1b --- /dev/null +++ b/DataSetDetection.lua @@ -0,0 +1,112 @@ +local utilities = paths.dofile('utils.lua') +local concat = utilities.concat +local boxoverlap = utilities.boxoverlap + +local DataSetDetection = torch.class('nnf.DataSetDetection') + +function DataSetDetection:__init() + self.classes = nil + self.num_classes = nil + self.image_set = nil + self.dataset_name = nil +end + +function DataSetDetection:getImage(i) +end + +function DataSetDetection:getAnnotation(i) +end + +function DataSetDetection:getROIBoxes(i) +end + +function DataSetDetection:getGTBoxes(i) +end + +function DataSetDetection:size() + return #self.img_ids +end + +function DataSetDetection:__tostring__() + local str = torch.type(self) + str = str .. '\n Dataset Name: ' .. self.dataset_name + str = str .. '\n ImageSet: '.. self.image_set + str = str .. '\n Number of images: '.. self:size() + str = str .. '\n Classes:' + for k,v in ipairs(self.classes) do + str = str .. '\n '..v + end + return str +end + +function DataSetDetection:bestOverlap(all_boxes, gt_boxes, gt_classes) + local num_total_boxes = all_boxes:size(1) + local num_gt_boxes = gt_boxes:dim() > 0 and gt_boxes:size(1) or 0 + local overlap_class = torch.FloatTensor(num_total_boxes,self.num_classes):zero() + local overlap = torch.FloatTensor(num_total_boxes,num_gt_boxes):zero() + for idx=1,num_gt_boxes do + local o = boxoverlap(all_boxes,gt_boxes[idx]) + local tmp = overlap_class[{{},gt_classes[idx]}] -- pointer copy + tmp[tmp:lt(o)] = o[tmp:lt(o)] + overlap[{{},idx}] = o + end + -- get max class overlap + --rec.overlap,rec.label = rec.overlap:max(2) + --rec.overlap = torch.squeeze(rec.overlap,2) + --rec.label = torch.squeeze(rec.label,2) + --rec.label[rec.overlap:eq(0)] = 0 + local correspondance + if num_gt_boxes > 0 then + overlap,correspondance = overlap:max(2) + overlap = torch.squeeze(overlap,2) + correspondance = torch.squeeze(correspondance,2) + correspondance[overlap:eq(0)] = 0 + else + overlap = torch.FloatTensor(num_total_boxes):zero() + correspondance = torch.LongTensor(num_total_boxes):zero() + end + return overlap, correspondance, overlap_class +end + +function DataSetDetection:attachProposals(i) + + local boxes = self:getROIBoxes(i) + local gt_boxes,gt_classes,valid_objects,anno = self:getGTBoxes(i) + + local all_boxes = concat(gt_boxes,boxes,1) + + local num_boxes = boxes:dim() > 0 and boxes:size(1) or 0 + local num_gt_boxes = #gt_classes + + local rec = {} + rec.gt = concat(torch.ByteTensor(num_gt_boxes):fill(1), + torch.ByteTensor(num_boxes):fill(0) ) + + rec.overlap, rec.correspondance, rec.overlap_class = + self:bestOverlap(all_boxes,gt_boxes,gt_classes) + rec.label = torch.IntTensor(num_boxes+num_gt_boxes):fill(0) + for idx=1,(num_boxes+num_gt_boxes) do + local corr = rec.correspondance[idx] + if corr > 0 then + rec.label[idx] = gt_classes[corr] + end + end + + rec.boxes = all_boxes + rec.class = concat(torch.CharTensor(gt_classes), + torch.CharTensor(num_boxes):fill(0)) + + if self.save_objs then + rec.objects = {} + for _,idx in pairs(valid_objects) do + table.insert(rec.objects,anno.object[idx]) + end + end + + function rec:size() + return (num_boxes+num_gt_boxes) + end + + return rec +end + diff --git a/DataSetPascal.lua b/DataSetPascal.lua index a37b0b0..9e403df 100644 --- a/DataSetPascal.lua +++ b/DataSetPascal.lua @@ -1,11 +1,13 @@ local matio = require 'matio' local argcheck = dofile'argcheck.lua'--require 'argcheck' local xml = require 'xml' -local concat = paths.dofile('utils.lua').concat +local utilities = paths.dofile('utils.lua') +local concat = utilities.concat +local boxoverlap = utilities.boxoverlap matio.use_lua_strings = true -local DataSetPascal = torch.class('nnf.DataSetPascal') +local DataSetPascal,parent = torch.class('nnf.DataSetPascal', 'nnf.DataSetDetection') local function lines_from(file) -- get all lines from a file, returns an empty @@ -104,7 +106,7 @@ local initcheck = argcheck{ } function DataSetPascal:__init(...) - + parent.__init(self) local args = initcheck(...) print(args) for k,v in pairs(args) do self[k] = v end @@ -249,34 +251,6 @@ function DataSetPascal:getROIBoxes(i) return self.roidb[i]--self.roidb[self.img2roidb[self.img_ids[i] ] ] end -local function boxoverlap(a,b) - local b = b.xmin and {b.xmin,b.ymin,b.xmax,b.ymax} or b - - local x1 = a:select(2,1):clone() - x1[x1:lt(b[1])] = b[1] - local y1 = a:select(2,2):clone() - y1[y1:lt(b[2])] = b[2] - local x2 = a:select(2,3):clone() - x2[x2:gt(b[3])] = b[3] - local y2 = a:select(2,4):clone() - y2[y2:gt(b[4])] = b[4] - - local w = x2-x1+1; - local h = y2-y1+1; - local inter = torch.cmul(w,h):float() - local aarea = torch.cmul((a:select(2,3)-a:select(2,1)+1) , - (a:select(2,4)-a:select(2,2)+1)):float() - local barea = (b[3]-b[1]+1) * (b[4]-b[2]+1); - - -- intersection over union overlap - local o = torch.cdiv(inter , (aarea+barea-inter)) - -- set invalid entries to 0 overlap - o[w:lt(0)] = 0 - o[h:lt(0)] = 0 - - return o -end - function DataSetPascal:getGTBoxes(i) local anno = self:getAnnotation(i) local valid_objects = {} @@ -311,81 +285,6 @@ function DataSetPascal:getGTBoxes(i) end -function DataSetPascal:bestOverlap(all_boxes, gt_boxes, gt_classes) - local num_total_boxes = all_boxes:size(1) - local num_gt_boxes = gt_boxes:dim() > 0 and gt_boxes:size(1) or 0 - local overlap_class = torch.FloatTensor(num_total_boxes,self.num_classes):zero() - local overlap = torch.FloatTensor(num_total_boxes,num_gt_boxes):zero() - for idx=1,num_gt_boxes do - local o = boxoverlap(all_boxes,gt_boxes[idx]) - local tmp = overlap_class[{{},gt_classes[idx]}] -- pointer copy - tmp[tmp:lt(o)] = o[tmp:lt(o)] - overlap[{{},idx}] = o - end - -- get max class overlap - --rec.overlap,rec.label = rec.overlap:max(2) - --rec.overlap = torch.squeeze(rec.overlap,2) - --rec.label = torch.squeeze(rec.label,2) - --rec.label[rec.overlap:eq(0)] = 0 - local correspondance - if num_gt_boxes > 0 then - overlap,correspondance = overlap:max(2) - overlap = torch.squeeze(overlap,2) - correspondance = torch.squeeze(correspondance,2) - correspondance[overlap:eq(0)] = 0 - else - overlap = torch.FloatTensor(num_total_boxes):zero() - correspondance = torch.LongTensor(num_total_boxes):zero() - end - return overlap, correspondance, overlap_class -end - -function DataSetPascal:attachProposals(i) - - if not self.roidb then - self:loadROIDB() - end - - local boxes = self:getROIBoxes(i) - local gt_boxes,gt_classes,valid_objects,anno = self:getGTBoxes(i) - - local all_boxes = concat(gt_boxes,boxes,1) - - local num_boxes = boxes:dim() > 0 and boxes:size(1) or 0 - local num_gt_boxes = #gt_classes - - local rec = {} - rec.gt = concat(torch.ByteTensor(num_gt_boxes):fill(1), - torch.ByteTensor(num_boxes):fill(0) ) - - rec.overlap, rec.correspondance, rec.overlap_class = - self:bestOverlap(all_boxes,gt_boxes,gt_classes) - rec.label = torch.IntTensor(num_boxes+num_gt_boxes):fill(0) - for idx=1,(num_boxes+num_gt_boxes) do - local corr = rec.correspondance[idx] - if corr > 0 then - rec.label[idx] = gt_classes[corr] - end - end - - rec.boxes = all_boxes - rec.class = concat(torch.CharTensor(gt_classes), - torch.CharTensor(num_boxes):fill(0)) - - if self.save_objs then - rec.objects = {} - for _,idx in pairs(valid_objects) do - table.insert(rec.objects,anno.object[idx]) - end - end - - function rec:size() - return (num_boxes+num_gt_boxes) - end - - return rec -end - function DataSetPascal:createROIs() if self.rois then return diff --git a/nnf.lua b/nnf.lua index c41ebfc..e0551b9 100644 --- a/nnf.lua +++ b/nnf.lua @@ -7,7 +7,10 @@ nnf = {} torch.include('nnf','ImageTransformer.lua') +torch.include('nnf','DataSetDetection.lua') torch.include('nnf','DataSetPascal.lua') +torch.include('nnf','DataSetCOCO.lua') + torch.include('nnf','BatchProviderBase.lua') torch.include('nnf','BatchProvider.lua') torch.include('nnf','BatchProviderROI.lua') diff --git a/utils.lua b/utils.lua index 785d2a3..20bdf1c 100644 --- a/utils.lua +++ b/utils.lua @@ -124,7 +124,6 @@ end -------------------------------------------------------------------------------- local function boxoverlap(a,b) - --local b = anno.objects[j] local b = b.xmin and {b.xmin,b.ymin,b.xmax,b.ymax} or b local x1 = a:select(2,1):clone() @@ -314,6 +313,7 @@ utils.sanitize = sanitize utils.recursiveResizeAsCopyTyped = recursiveResizeAsCopyTyped utils.flipBoundingBoxes = flipBoundingBoxes utils.concat = concat +utils.boxoverlap = boxoverlap return utils From c02a6b4e9904a67010644f4ee554e3f71f89259a Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Wed, 7 Oct 2015 11:17:54 +0200 Subject: [PATCH 48/79] Generalizing argument checks --- BatchProvider.lua | 20 ++++++++++++++++++-- BatchProviderROI.lua | 17 ++++++++++++++++- DataSetDetection.lua | 1 + FRCNN.lua | 1 + RCNN.lua | 1 + SPP.lua | 1 + Trainer.lua | 5 +++-- 7 files changed, 41 insertions(+), 5 deletions(-) diff --git a/BatchProvider.lua b/BatchProvider.lua index 6fb2631..eb20014 100644 --- a/BatchProvider.lua +++ b/BatchProvider.lua @@ -1,16 +1,32 @@ local BatchProvider,parent = torch.class('nnf.BatchProvider','nnf.BatchProviderBase') + local argcheck = require 'argcheck' + +local env = require 'argcheck.env' -- retrieve argcheck environement +-- this is the default type function +-- which can be overrided by the user +function env.istype(obj, typename) + if typename == 'DataSet' then + return obj._isDataSet + end + if typename == 'FeatureProvider' then + return obj._isFeatureProvider + end + return torch.type(obj) == typename +end + + local initcheck = argcheck{ pack=true, noordered=true, {name="dataset", - type="nnf.DataSetPascal", + type="DataSet", help="A dataset class" }, {name="feat_provider", - type="nnf.RCNN", + type="FeatureProvider", help="A feat provider class" }, {name="batch_size", diff --git a/BatchProviderROI.lua b/BatchProviderROI.lua index 0cbee39..5f1c3ff 100644 --- a/BatchProviderROI.lua +++ b/BatchProviderROI.lua @@ -1,11 +1,26 @@ local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProviderBase') local argcheck = require 'argcheck' + +local env = require 'argcheck.env' -- retrieve argcheck environement +-- this is the default type function +-- which can be overrided by the user +function env.istype(obj, typename) + if typename == 'DataSet' then + return obj._isDataSet + end + if typename == 'FeatureProvider' then + return obj._isFeatureProvider + end + return torch.type(obj) == typename +end + + local initcheck = argcheck{ pack=true, noordered=true, {name="dataset", - type="nnf.DataSetPascal", + type="DataSet", help="A dataset class" }, {name="feat_provider", diff --git a/DataSetDetection.lua b/DataSetDetection.lua index 1933a1b..a557ece 100644 --- a/DataSetDetection.lua +++ b/DataSetDetection.lua @@ -3,6 +3,7 @@ local concat = utilities.concat local boxoverlap = utilities.boxoverlap local DataSetDetection = torch.class('nnf.DataSetDetection') +DataSetDetection._isDataSet = true function DataSetDetection:__init() self.classes = nil diff --git a/FRCNN.lua b/FRCNN.lua index d6292f5..064c8c3 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -1,6 +1,7 @@ local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local recursiveResizeAsCopyTyped = paths.dofile('utils.lua').recursiveResizeAsCopyTyped local FRCNN = torch.class('nnf.FRCNN') +FRCNN._isFeatureProvider = true local argcheck = require 'argcheck' local initcheck = argcheck{ diff --git a/RCNN.lua b/RCNN.lua index 346f4db..5af8200 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -33,6 +33,7 @@ local initcheck = argcheck{ local RCNN = torch.class('nnf.RCNN') +RCNN._isFeatureProvider = true function RCNN:__init(...) -- self.image_transformer = nnf.ImageTransformer{ diff --git a/SPP.lua b/SPP.lua index 421dcf1..a8e9017 100644 --- a/SPP.lua +++ b/SPP.lua @@ -2,6 +2,7 @@ local hdf5 = require 'hdf5' local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local SPP = torch.class('nnf.SPP') +SPP._isFeatureProvider = true -- argcheck crashes with that many arguments, and using unordered -- doesn't seems practical diff --git a/Trainer.lua b/Trainer.lua index 1828584..8ac9c47 100644 --- a/Trainer.lua +++ b/Trainer.lua @@ -6,7 +6,7 @@ local recursiveResizeAsCopyTyped = utils.recursiveResizeAsCopyTyped local Trainer = torch.class('nnf.Trainer') -function Trainer:__init(module,criterion,batch_provider) +function Trainer:__init(module,criterion,batch_provider,optimState) self.module = module self.criterion = criterion @@ -14,7 +14,8 @@ function Trainer:__init(module,criterion,batch_provider) self.parameters,self.gradParameters = self.module:getParameters() - self.optimState = {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, + self.optimState = optimState or + {learningRate = 1e-3, weightDecay = 0.0005, momentum = 0.9, learningRateDecay = 0, dampening = 0} self.epoch = 0 From 376a3c42ce229b65ef28c77164260e392aa5e463 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Thu, 8 Oct 2015 18:59:39 +0200 Subject: [PATCH 49/79] Improve visualization tool --- tests/test_full_frcnn.lua | 23 ++++++++++++--------- tests/test_full_rcnn.lua | 2 +- tests/test_visualization2.lua | 38 +++++++++++++++++++++++++++++++++++ visualize_detections.lua | 20 ++++++++++++++---- 4 files changed, 69 insertions(+), 14 deletions(-) create mode 100644 tests/test_visualization2.lua diff --git a/tests/test_full_frcnn.lua b/tests/test_full_frcnn.lua index b13f749..c49c2c6 100644 --- a/tests/test_full_frcnn.lua +++ b/tests/test_full_frcnn.lua @@ -22,7 +22,7 @@ fp:training() bp = nnf.BatchProviderROI{dataset=ds,feat_provider=fp, bg_threshold={0.1,0.5} } ---bp:setupData() +bp:setupData() -------------------------------------------------------------------------------- -- define model @@ -76,10 +76,11 @@ do model:add(classifier) end model:cuda() -model = nil -collectgarbage() -model = torch.load('test_model.t7') -model:cuda() + +--model = nil +--collectgarbage() +--model = torch.load('test_model.t7') +--model:cuda() collectgarbage() -------------------------------------------------------------------------------- -- train @@ -89,12 +90,14 @@ criterion = nn.CrossEntropyCriterion():cuda() trainer = nnf.Trainer(model,criterion,bp) -for i=1,0 do +savedModel = model:clone('weight','bias','running_mean','running_std') +for i=1,400 do if i == 300 then trainer.optimState.learningRate = trainer.optimState.learningRate/10 end - xlua.progress(i,400) + print(('Iteration %3d/%-3d'):format(i,400)) trainer:train(100) + print((' Train error: %g'):format(trainer.fx[i])) end -------------------------------------------------------------------------------- @@ -112,8 +115,10 @@ dsv = nnf.DataSetPascal{image_set='test', fpv = nnf.FRCNN{image_transformer=image_transformer} fpv:evaluate() -exp_name = 'test1_frcnn' +exp_name = 'test2_frcnn' -tester = nnf.Tester_FRCNN(model,fpv,dsv) +tester = nnf.Tester(model,fpv,dsv) tester.cachefolder = 'cachedir/'..exp_name tester:test(40000) + +torch.save(paths.concat(tester.cachefolder,'model.t7'),savedModel) diff --git a/tests/test_full_rcnn.lua b/tests/test_full_rcnn.lua index ea3d8a6..6abd6ab 100644 --- a/tests/test_full_rcnn.lua +++ b/tests/test_full_rcnn.lua @@ -115,6 +115,6 @@ fpv = nnf.RCNN{image_transformer=image_transformer, fpv:evaluate() exp_name = 'test1_rcnn' -tester = nnf.Tester_FRCNN(model,fpv,dsv) +tester = nnf.Tester(model,fpv,dsv) tester.cachefolder = 'cachedir/'..exp_name tester:test(40000) diff --git a/tests/test_visualization2.lua b/tests/test_visualization2.lua new file mode 100644 index 0000000..b289e82 --- /dev/null +++ b/tests/test_visualization2.lua @@ -0,0 +1,38 @@ +require 'cutorch' +require 'nnf' +require 'cudnn' +require 'inn' +dofile 'visualize_detections.lua' + +cutorch.setDevice(2) + +--model = torch.load('cachedir/test2_frcnn/model.t7') +model = torch.load('cachedir/model.t7') +--model:add(nn.SoftMax():cuda()) + +image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} + + +ds = nnf.DataSetPascal{image_set='test', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } + +fp = nnf.FRCNN{image_transformer=image_transformer} +fp:evaluate() +model:evaluate() +detect = nnf.ImageDetect(model,fp) + +im_idx = 700 + +I = ds:getImage(im_idx) +boxes = ds:getROIBoxes(im_idx) +--boxes = ds:getGTBoxes(im_idx) + +scores,bb = detect:detect(I,boxes) + +visualize_detections(I,boxes,scores,0.5,ds.classes) + + diff --git a/visualize_detections.lua b/visualize_detections.lua index 5d14bdc..19c5539 100644 --- a/visualize_detections.lua +++ b/visualize_detections.lua @@ -1,4 +1,6 @@ -function visualize_detections(im,boxes,scores,thresh) +local nms = dofile 'nms.lua' + +function visualize_detections(im,boxes,scores,thresh,cl_names) local ok = pcall(require,'qt') if not ok then error('You need to run visualize_detections using qlua') @@ -16,16 +18,22 @@ function visualize_detections(im,boxes,scores,thresh) local r = torch.range(1,boxes:size(1)):long() local rr = r[idx_thresh] local boxes_thresh = boxes:index(1,rr) + + local keep = nms(torch.cat(boxes_thresh:float(),max_score,2),0.3) + + boxes_thresh = boxes_thresh:index(1,keep) + max_score = max_score:index(1,keep) + idx = idx:index(1,keep) local num_boxes = boxes_thresh:size(1) local widths = boxes_thresh[{{},3}] - boxes_thresh[{{},1}] local heights = boxes_thresh[{{},4}] - boxes_thresh[{{},2}] local x,y = im:size(3),im:size(2) - local w = qtwidget.newwindow(x,y,"test") + local w = qtwidget.newwindow(x,y,"Detections") local qtimg = qt.QImage.fromTensor(im) w:image(0,0,x,y,qtimg) - local fontsize = 10 + local fontsize = 15 for i=1,num_boxes do local x,y = boxes_thresh[{i,1}],boxes_thresh[{i,2}] @@ -38,7 +46,11 @@ function visualize_detections(im,boxes,scores,thresh) w:moveto(x,y+fontsize) w:setcolor("red") w:setfont(qt.QFont{serif=true,italic=true,size=fontsize,bold=true}) - w:show(string.format('%d: %.2f',idx[i],max_score[i])) + if cl_names then + w:show(string.format('%s: %.2f',cl_names[idx[i]],max_score[i])) + else + w:show(string.format('%d: %.2f',idx[i],max_score[i])) + end end w:setcolor("red") w:setlinewidth(2) From a7f3b99e14d30fb962dbd592f7de06748280c290 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Wed, 14 Oct 2015 20:14:22 +0200 Subject: [PATCH 50/79] Started updating README --- README.md | 88 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index b1525db..2ea104d 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,80 @@ ## Object detection in torch -Implementation of some object detection frameworks in [torch](http://torch.ch). +This library aims to provide a simple architecture to easily perform object detection in [torch](http://torch.ch). +It currently contains code for training the following frameworks: [RCNN](link), [SPP](link) and [Fast-RCNN](link). + +It consists of 6 basic classes: + +* ImageTransformer: Preprocess an image before feeding it to the network +* DataSetDetection: Generic dataset class for object detection. + * DataSetPascal + * DataSetCOCO +* FeatureProvider: Implements the necessary operations on images and bounding boxes + * RCNN + * SPP + * Fast-RCNN +* BatchProvider: Samples random patches + * BatchProviderRC + * BatchProviderIC +* ImageDetect: Encapsulates a model and a feature provider to perform the detection +* Tester: Evaluate the detection using Pascal VOC approach. + +## Feature Provider +The `FeatureProvider` class defines the way different algorithms process an image and a set of bounding boxes to feed it to the CNN. +It implements a `getFeature(image,boxes)` function, which computes the necessary transformations in the input data, a `postProcess()`, which takes the output of the network plus the original inputs and post-process them. This post-processing could be a bounding box regression step, for example. + +## Batch Provider +This class implements sampling strategies for training Object Detectors. +In its constructor, it takes as argument a `DataSetDetection`, and a `FeatureProvider`. +It implements a `getBatch` function, which samples from the `DataSet` using `FeatureProvider`. + +### BatchProviderRC +ROI-Centric Batch Provider, it samples the patches randomly over all the pool of patches. + +### BatchProviderIC +Image-Centric Batch Provider, it first samples a set of images, and then a set of patches is sampled on those sampled images. + +## Examples +Here we show a simple example demonstrating how to perform object detection given an image and a set of bounding boxes. + +```lua +require 'nnf' +require 'image' +require 'nn' + +model = torch.load('model.t7') +I = image.lena() +bboxes = {1,1,200,200} + +image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} +feat_provider = nnf.RCNN{crop_size=227,image_transformer=image_transformer} + +-- the following could also be done by creating an instance of ImageDetect +-- and calling :detect(I,boxes) +feats = feat_provider:getFeature(I,bboxes) +scores = feat_provider:compute(model,feats) + +-- visualization +threshold = 0.5 +visualize_detections(I,bboxes,scores,threshold) + +``` + +More examples can be found at [examples](http://github.com/fmassa/object-detection.torch/examples/) + +#### Bounding box proposals +Note that this repo doesn't contain code for generating bounding box proposals. For the moment, they are pre-computed and loaded at run time. ### Dependencies It requires the following packages - - [xml](http://doc.lubyk.org/xml.html) - - [matio-ffi.torch](https://github.com/soumith/matio-ffi.torch) - - [hdf5](https://github.com/deepmind/torch-hdf5) - - [inn](https://github.com/szagoruyko/imagine-nn) + - [xml](http://doc.lubyk.org/xml.html) (For `DataSetPascal`) + - [matio-ffi.torch](https://github.com/soumith/matio-ffi.torch) (For `DataSetPascal`) + - [hdf5](https://github.com/deepmind/torch-hdf5) (for `SPP`) + - [inn](https://github.com/szagoruyko/imagine-nn) (for `SPP`) To install them all, do @@ -28,6 +93,10 @@ luarocks install matio To install `hdf5`, follow the instructions in [here](https://github.com/deepmind/torch-hdf5/blob/master/doc/usage.md) +## Old code +The old version of this repo can be found [here](link). + + ### Running this code First, clone this repo @@ -46,15 +115,6 @@ model:add(classifier) ``` where `features` can be a `nn.Sequential` of several convolutions and `pooling_layer` is the last pooling with reshaping of the data to feed it to the classifer. See `models/zeiler.lua` for an example. -To finetune the network for detection, simply run -``` -th main.lua -``` - -To get an overview of the different parameters, do -``` -th main.lua -h -``` The default is to consider that the dataset is present in `datasets/VOCdevkit/VOC2007/`. The default location of bounding boxes `.mat` files (in RCNN format) is supposed to be in `data/selective_search_data/`. From 9f6abf1b3982322777d42cba948393abc94b43b2 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 18 Oct 2015 21:20:38 +0200 Subject: [PATCH 51/79] Improve README --- README.md | 50 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 2ea104d..b268716 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,56 @@ ## Object detection in torch This library aims to provide a simple architecture to easily perform object detection in [torch](http://torch.ch). -It currently contains code for training the following frameworks: [RCNN](link), [SPP](link) and [Fast-RCNN](link). +It currently contains code for training the following frameworks: [RCNN](http://arxiv.org/abs/1311.2524), [SPP](http://arxiv.org/abs/1406.4729) and [Fast-RCNN](http://arxiv.org/abs/1504.08083). It consists of 6 basic classes: * ImageTransformer: Preprocess an image before feeding it to the network * DataSetDetection: Generic dataset class for object detection. * DataSetPascal - * DataSetCOCO -* FeatureProvider: Implements the necessary operations on images and bounding boxes - * RCNN - * SPP - * Fast-RCNN -* BatchProvider: Samples random patches - * BatchProviderRC - * BatchProviderIC + * DataSetCOCO (not finished) +* [FeatureProvider](#feat_provider): Implements the necessary operations on images and bounding boxes + * [RCNN](#rcnn) + * [SPP](#spp) + * [Fast-RCNN](#frcnn) +* [BatchProvider](#batch_provider): Samples random patches + * [BatchProviderRC](#batch_provider_rc): ROI-Centric + * [BatchProviderIC](#batch_provider_ic): Image-Centric * ImageDetect: Encapsulates a model and a feature provider to perform the detection * Tester: Evaluate the detection using Pascal VOC approach. -## Feature Provider + +### Feature Provider The `FeatureProvider` class defines the way different algorithms process an image and a set of bounding boxes to feed it to the CNN. It implements a `getFeature(image,boxes)` function, which computes the necessary transformations in the input data, a `postProcess()`, which takes the output of the network plus the original inputs and post-process them. This post-processing could be a bounding box regression step, for example. -## Batch Provider + +#### RCNN +This is the first work that used CNNs for object detection using bounding box proposals. +The transformation is the simplest one. It crops the image at the specified positions given by the bounding boxes, and rescale them to be square. + +#### SPP +Contrary to RCNN, SPP crops the images in the feature space (here, `conv5`). It allows to compute the convolutional features once for the entire image, making it much more efficient. + +#### Fast-RCNN +Similar to SPP, Fast-RCNN also crops the images in the feature space, but instead of keeping the convolutional layers fixed, they allow it to train together with the fully-connected layers. + + + +### Batch Provider This class implements sampling strategies for training Object Detectors. In its constructor, it takes as argument a `DataSetDetection`, and a `FeatureProvider`. It implements a `getBatch` function, which samples from the `DataSet` using `FeatureProvider`. -### BatchProviderRC + +#### BatchProviderRC ROI-Centric Batch Provider, it samples the patches randomly over all the pool of patches. -### BatchProviderIC + +#### BatchProviderIC Image-Centric Batch Provider, it first samples a set of images, and then a set of patches is sampled on those sampled images. -## Examples +### Examples Here we show a simple example demonstrating how to perform object detection given an image and a set of bounding boxes. ```lua @@ -62,7 +78,7 @@ visualize_detections(I,bboxes,scores,threshold) ``` -More examples can be found at [examples](http://github.com/fmassa/object-detection.torch/examples/) +For an illustration on how to use this code to train a detector, or to evaluate it on Pascal, see the [examples](http://github.com/fmassa/object-detection.torch/tree/master/examples). #### Bounding box proposals Note that this repo doesn't contain code for generating bounding box proposals. For the moment, they are pre-computed and loaded at run time. @@ -93,8 +109,8 @@ luarocks install matio To install `hdf5`, follow the instructions in [here](https://github.com/deepmind/torch-hdf5/blob/master/doc/usage.md) -## Old code -The old version of this repo can be found [here](link). +### Old code +The old version of this repo can be found [here](https://github.com/fmassa/object-detection.torch/tree/legacy). ### Running this code From 771df8f9400dcf1c39e15b278566da7e8a6d98d9 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 18 Oct 2015 22:11:29 +0200 Subject: [PATCH 52/79] Add example on how to train and test RCNN --- examples/train_test_rcnn.lua | 109 +++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 examples/train_test_rcnn.lua diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua new file mode 100644 index 0000000..6916ac0 --- /dev/null +++ b/examples/train_test_rcnn.lua @@ -0,0 +1,109 @@ +require 'nnf' +--require 'loadcaffe' + +cmd = torch.CmdLine() +cmd:text('Example on how to train/test a RCNN based object detector on Pascal') +cmd:option('-lr',1e-3,'learning rate') +cmd:option('-num_iter',40000,'number of iterations') +cmd:option('-disp_iter',100,'display every n iterations') +cmd:option('-lr_step',30000,'step for reducing the learning rate') +cmd:option('-gpu',1,'gpu to use (0 for cpu mode)') +cmd:option('-name','rcnn-example','base name') + +opt = cmd:parse(arg or {}) + +exp_name = cmd:string(opt.name, opt, {name=true, gpu=true}) + +local tensor_type +if opt.gpu > 0 then + require 'cunn' + cutorch.setDevice(opt.gpu) + tensor_type = 'torch.CudaTensor' +else + require 'nn' + tensor_type = 'torch.FloatTensor' +end + +-- this class holds all the necessary informationn regarding the dataset +ds = nnf.DataSetPascal{image_set='trainval', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } +-- define the transformations to do in the image before +-- passing it to the network +local image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} +-------------------------------------------------------------------------------- +-- define feature providers +-------------------------------------------------------------------------------- + +local crop_size = 224 + +-- the feature provider extract the features for a given image + bounding box +fp = nnf.RCNN{image_transformer=image_transformer, + crop_size=crop_size} +-- different frameworks can behave differently during training and testing +fp:training() +-------------------------------------------------------------------------------- +-- define batch providers +-------------------------------------------------------------------------------- + +bp = nnf.BatchProvider{dataset=ds,feat_provider=fp, + bg_threshold={0.0,0.5}, + nTimesMoreData=2, + iter_per_batch=10,--100, + } +bp:setupData() + +-------------------------------------------------------------------------------- +-- define model and criterion +-------------------------------------------------------------------------------- +paths.dofile('../models/rcnn.lua') +model = createModel() + +criterion = nn.CrossEntropyCriterion() + +model:type(tensor_type) +criterion:type(tensor_type) +-------------------------------------------------------------------------------- +-- train +-------------------------------------------------------------------------------- + +trainer = nnf.Trainer(model, criterion, bp) + +local num_iter = opt.num_iter/opt.disp_iter +local step_iter = opt.lr_step/opt.disp_iter + +for i=1,num_iter do + if i % lr_step == 0 then + trainer.optimState.learningRate = trainer.optimState.learningRate/10 + end + print(('Iteration %3d/%-3d'):format(i,num_iter)) + trainer:train(opt.disp_iter) +end + +-------------------------------------------------------------------------------- +-- evaluate +-------------------------------------------------------------------------------- + +-- add softmax to classfier, because we were using nn.CrossEntropyCriterion +local softmax = nn.SoftMax() +softmax:type(tensor_type) +model:add(softmax) + +-- dataset for evaluation +dsv = nnf.DataSetPascal{image_set='test', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' + } + + +fpv = nnf.RCNN{image_transformer=image_transformer, + crop_size=crop_size} +fpv:evaluate() + +-- define the class to test the model on the full dataset +tester = nnf.Tester(model,fpv,dsv) +tester.cachefolder = '../cachedir/'..exp_name +tester:test(opt.num_iter) From af3a622aceb108cc8a0914fcfac8f2f3b6ba95fd Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 18 Oct 2015 22:19:07 +0200 Subject: [PATCH 53/79] Renaming files --- BatchProviderROI.lua => BatchProviderIC.lua | 0 BatchProvider.lua => BatchProviderRC.lua | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename BatchProviderROI.lua => BatchProviderIC.lua (100%) rename BatchProvider.lua => BatchProviderRC.lua (100%) diff --git a/BatchProviderROI.lua b/BatchProviderIC.lua similarity index 100% rename from BatchProviderROI.lua rename to BatchProviderIC.lua diff --git a/BatchProvider.lua b/BatchProviderRC.lua similarity index 100% rename from BatchProvider.lua rename to BatchProviderRC.lua From 5b64965a2a3128dc51f91d6dba886d551ebbfeae Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 18 Oct 2015 22:20:12 +0200 Subject: [PATCH 54/79] Finish renaming files --- BatchProviderIC.lua | 2 +- BatchProviderRC.lua | 2 +- nnf.lua | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/BatchProviderIC.lua b/BatchProviderIC.lua index 5f1c3ff..169d80d 100644 --- a/BatchProviderIC.lua +++ b/BatchProviderIC.lua @@ -1,4 +1,4 @@ -local BatchProviderROI, parent = torch.class('nnf.BatchProviderROI','nnf.BatchProviderBase') +local BatchProviderROI, parent = torch.class('nnf.BatchProviderIC','nnf.BatchProviderBase') local argcheck = require 'argcheck' diff --git a/BatchProviderRC.lua b/BatchProviderRC.lua index eb20014..a090abd 100644 --- a/BatchProviderRC.lua +++ b/BatchProviderRC.lua @@ -1,5 +1,5 @@ local BatchProvider,parent = - torch.class('nnf.BatchProvider','nnf.BatchProviderBase') + torch.class('nnf.BatchProviderRC','nnf.BatchProviderBase') local argcheck = require 'argcheck' diff --git a/nnf.lua b/nnf.lua index e0551b9..d9fd777 100644 --- a/nnf.lua +++ b/nnf.lua @@ -12,8 +12,8 @@ torch.include('nnf','DataSetPascal.lua') torch.include('nnf','DataSetCOCO.lua') torch.include('nnf','BatchProviderBase.lua') -torch.include('nnf','BatchProvider.lua') -torch.include('nnf','BatchProviderROI.lua') +torch.include('nnf','BatchProviderIC.lua') +torch.include('nnf','BatchProviderRC.lua') torch.include('nnf','SPP.lua') torch.include('nnf','RCNN.lua') From eb579f6e33b5e7a8d8769e5224e538620402d2c9 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 18 Oct 2015 22:54:26 +0200 Subject: [PATCH 55/79] Update README and examples --- BatchProviderIC.lua | 10 ++-- README.md | 38 ++++++++++++++- SPP.lua | 2 +- examples/train_test_rcnn.lua | 90 +++++++++++++++++++++++------------- 4 files changed, 102 insertions(+), 38 deletions(-) diff --git a/BatchProviderIC.lua b/BatchProviderIC.lua index 169d80d..e79fb6f 100644 --- a/BatchProviderIC.lua +++ b/BatchProviderIC.lua @@ -1,4 +1,4 @@ -local BatchProviderROI, parent = torch.class('nnf.BatchProviderIC','nnf.BatchProviderBase') +local BatchProvider, parent = torch.class('nnf.BatchProviderIC','nnf.BatchProviderBase') local argcheck = require 'argcheck' @@ -57,7 +57,7 @@ local initcheck = argcheck{ }, } -function BatchProviderROI:__init(...) +function BatchProvider:__init(...) parent.__init(self) local opts = initcheck(...) @@ -66,7 +66,7 @@ end -- setup is the same -function BatchProviderROI:permuteIdx() +function BatchProvider:permuteIdx() local total_img = self.dataset:size() local imgs_per_batch = self.imgs_per_batch @@ -105,7 +105,7 @@ function BatchProviderROI:permuteIdx() end -function BatchProviderROI:selectBBoxes(fg_windows,bg_windows) +function BatchProvider:selectBBoxes(fg_windows,bg_windows) local fg_num_each = torch.round(self.fg_num_each/self.imgs_per_batch) local bg_num_each = torch.round(self.bg_num_each/self.imgs_per_batch) @@ -138,7 +138,7 @@ function BatchProviderROI:selectBBoxes(fg_windows,bg_windows) return bboxes, labels end -function BatchProviderROI:getBatch() +function BatchProvider:getBatch() local dataset = self.dataset self.fg_num_each = self.fg_fraction * self.batch_size diff --git a/README.md b/README.md index b268716..161b99f 100644 --- a/README.md +++ b/README.md @@ -23,32 +23,68 @@ It consists of 6 basic classes: ### Feature Provider The `FeatureProvider` class defines the way different algorithms process an image and a set of bounding boxes to feed it to the CNN. It implements a `getFeature(image,boxes)` function, which computes the necessary transformations in the input data, a `postProcess()`, which takes the output of the network plus the original inputs and post-process them. This post-processing could be a bounding box regression step, for example. +Every Feature Provider constructor take as input a `ImageTransformer`, and a `max_batch_size` (used for evaluation). #### RCNN This is the first work that used CNNs for object detection using bounding box proposals. The transformation is the simplest one. It crops the image at the specified positions given by the bounding boxes, and rescale them to be square. +The constructor has the following arguments: + * `crop_size` + * `padding` + * `use_square` + #### SPP Contrary to RCNN, SPP crops the images in the feature space (here, `conv5`). It allows to compute the convolutional features once for the entire image, making it much more efficient. +The constructor has the following arguments: + * `model` + * `pooling_scales` + * `num_feat_chns` + * `scales`: image scales + * `sz_conv_standard` + * `step_standard` + * `offset0` + * `offset` + * `inputArea` + * `use_cache` + * `cache_dir` + #### Fast-RCNN Similar to SPP, Fast-RCNN also crops the images in the feature space, but instead of keeping the convolutional layers fixed, they allow it to train together with the fully-connected layers. - +The constructor has the following arguments: + * `scale` + * `max_size` + * `inputArea` ### Batch Provider This class implements sampling strategies for training Object Detectors. In its constructor, it takes as argument a `DataSetDetection`, and a `FeatureProvider`. It implements a `getBatch` function, which samples from the `DataSet` using `FeatureProvider`. +The following arguments are present for all derived classes: + * `DataSetDetection` + * `FeatureProvider` + * `batch_size` + * `fg_fraction` + * `fg_threshold` + * `bg_threshold` + * `do_flip` #### BatchProviderRC ROI-Centric Batch Provider, it samples the patches randomly over all the pool of patches. +To minimize the number of disk access, it reads the data for a specified number of batches and store it in memory. +The constructor take the following optional arguments: + * `iter_per_batch` + * `nTimesMoreData` #### BatchProviderIC Image-Centric Batch Provider, it first samples a set of images, and then a set of patches is sampled on those sampled images. +The constructor take the following optional arguments: + * `imgs_per_batch` ### Examples Here we show a simple example demonstrating how to perform object detection given an image and a set of bounding boxes. diff --git a/SPP.lua b/SPP.lua index a8e9017..89c6081 100644 --- a/SPP.lua +++ b/SPP.lua @@ -48,7 +48,7 @@ local initcheck = argcheck{ {name="inputArea", type="number", default=224^2, - help="force square crops"}, + help="input area"}, {name="image_transformer", type="nnf.ImageTransformer", default=nnf.ImageTransformer{}, diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua index 6916ac0..40ff88b 100644 --- a/examples/train_test_rcnn.lua +++ b/examples/train_test_rcnn.lua @@ -1,19 +1,23 @@ require 'nnf' ---require 'loadcaffe' cmd = torch.CmdLine() cmd:text('Example on how to train/test a RCNN based object detector on Pascal') -cmd:option('-lr',1e-3,'learning rate') -cmd:option('-num_iter',40000,'number of iterations') -cmd:option('-disp_iter',100,'display every n iterations') -cmd:option('-lr_step',30000,'step for reducing the learning rate') -cmd:option('-gpu',1,'gpu to use (0 for cpu mode)') -cmd:option('-name','rcnn-example','base name') +cmd:text('') +cmd:text('Options:') +cmd:option('-name', 'rcnn-example', 'base name') +cmd:option('-lr', 1e-3, 'learning rate') +cmd:option('-num_iter', 40000, 'number of iterations') +cmd:option('-disp_iter', 100, 'display every n iterations') +cmd:option('-lr_step', 30000, 'step for reducing the learning rate') +cmd:option('-gpu', 1, 'gpu to use (0 for cpu mode)') opt = cmd:parse(arg or {}) exp_name = cmd:string(opt.name, opt, {name=true, gpu=true}) +rundir = '../cachedir/'..exp_name +paths.mkdir(rundir) + local tensor_type if opt.gpu > 0 then require 'cunn' @@ -24,16 +28,23 @@ else tensor_type = 'torch.FloatTensor' end +-------------------------------------------------------------------------------- +-- define data +-------------------------------------------------------------------------------- + -- this class holds all the necessary informationn regarding the dataset -ds = nnf.DataSetPascal{image_set='trainval', - datadir='datasets/VOCdevkit', - roidbdir='data/selective_search_data' - } +ds = nnf.DataSetPascal{ + image_set='trainval', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' +} -- define the transformations to do in the image before -- passing it to the network -local image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, - raw_scale = 255, - swap = {3,2,1}} +local image_transformer= nnf.ImageTransformer{ + mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1} +} -------------------------------------------------------------------------------- -- define feature providers -------------------------------------------------------------------------------- @@ -41,19 +52,23 @@ local image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.77 local crop_size = 224 -- the feature provider extract the features for a given image + bounding box -fp = nnf.RCNN{image_transformer=image_transformer, - crop_size=crop_size} +fp = nnf.RCNN{ + image_transformer=image_transformer, + crop_size=crop_size +} -- different frameworks can behave differently during training and testing fp:training() -------------------------------------------------------------------------------- -- define batch providers -------------------------------------------------------------------------------- -bp = nnf.BatchProvider{dataset=ds,feat_provider=fp, - bg_threshold={0.0,0.5}, - nTimesMoreData=2, - iter_per_batch=10,--100, - } +bp = nnf.BatchProviderRC{ + dataset=ds, + feat_provider=fp, + bg_threshold={0.0,0.5}, + nTimesMoreData=2, + iter_per_batch=10,--100, +} bp:setupData() -------------------------------------------------------------------------------- @@ -75,14 +90,24 @@ trainer = nnf.Trainer(model, criterion, bp) local num_iter = opt.num_iter/opt.disp_iter local step_iter = opt.lr_step/opt.disp_iter +trainer.optimState.learningRate = opt.lr + +local lightModel = model:clone('weight','bias') + +-- main training loop for i=1,num_iter do if i % lr_step == 0 then trainer.optimState.learningRate = trainer.optimState.learningRate/10 end print(('Iteration %3d/%-3d'):format(i,num_iter)) trainer:train(opt.disp_iter) + + if i% save_step == 0 then + torch.save(paths.concat(rundir, 'model.t7'), lightModel) + end end +torch.save(paths.concat(rundir, 'model.t7'), lightModel) -------------------------------------------------------------------------------- -- evaluate -------------------------------------------------------------------------------- @@ -93,17 +118,20 @@ softmax:type(tensor_type) model:add(softmax) -- dataset for evaluation -dsv = nnf.DataSetPascal{image_set='test', - datadir='datasets/VOCdevkit', - roidbdir='data/selective_search_data' - } - - -fpv = nnf.RCNN{image_transformer=image_transformer, - crop_size=crop_size} +dsv = nnf.DataSetPascal{ + image_set='test', + datadir='datasets/VOCdevkit', + roidbdir='data/selective_search_data' +} + +-- feature provider for evaluation +fpv = nnf.RCNN{ + image_transformer=image_transformer, + crop_size=crop_size +} fpv:evaluate() -- define the class to test the model on the full dataset -tester = nnf.Tester(model,fpv,dsv) -tester.cachefolder = '../cachedir/'..exp_name +tester = nnf.Tester(model, fpv, dsv) +tester.cachefolder = rundir tester:test(opt.num_iter) From b4ef6061ec11a9cb0c636e6700a1dd57d8a68abf Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 18 Oct 2015 23:46:48 +0200 Subject: [PATCH 56/79] Fix example --- examples/train_test_rcnn.lua | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua index 40ff88b..6665b25 100644 --- a/examples/train_test_rcnn.lua +++ b/examples/train_test_rcnn.lua @@ -9,6 +9,7 @@ cmd:option('-lr', 1e-3, 'learning rate') cmd:option('-num_iter', 40000, 'number of iterations') cmd:option('-disp_iter', 100, 'display every n iterations') cmd:option('-lr_step', 30000, 'step for reducing the learning rate') +cmd:option('-save_step', 10000, 'step for saving the model') cmd:option('-gpu', 1, 'gpu to use (0 for cpu mode)') opt = cmd:parse(arg or {}) @@ -74,7 +75,7 @@ bp:setupData() -------------------------------------------------------------------------------- -- define model and criterion -------------------------------------------------------------------------------- -paths.dofile('../models/rcnn.lua') +paths.dofile('../models/alexnet.lua') model = createModel() criterion = nn.CrossEntropyCriterion() @@ -88,7 +89,8 @@ criterion:type(tensor_type) trainer = nnf.Trainer(model, criterion, bp) local num_iter = opt.num_iter/opt.disp_iter -local step_iter = opt.lr_step/opt.disp_iter +local lr_step = opt.lr_step/opt.disp_iter +local save_step = opt.save_step/opt.disp_iter trainer.optimState.learningRate = opt.lr From e66b53299b665e07e893a5eafb57820fbd9e59b7 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 19 Oct 2015 00:09:30 +0200 Subject: [PATCH 57/79] Tweak example --- examples/train_test_rcnn.lua | 40 +++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua index 6665b25..4aeb0bc 100644 --- a/examples/train_test_rcnn.lua +++ b/examples/train_test_rcnn.lua @@ -11,6 +11,8 @@ cmd:option('-disp_iter', 100, 'display every n iterations') cmd:option('-lr_step', 30000, 'step for reducing the learning rate') cmd:option('-save_step', 10000, 'step for saving the model') cmd:option('-gpu', 1, 'gpu to use (0 for cpu mode)') +cmd:option('-seed', 1, 'fix random seed (if ~= 0)') +cmd:option('-numthreads',6, 'number of threads') opt = cmd:parse(arg or {}) @@ -19,6 +21,9 @@ exp_name = cmd:string(opt.name, opt, {name=true, gpu=true}) rundir = '../cachedir/'..exp_name paths.mkdir(rundir) +cmd:log(paths.concat(rundir,'log'), opt) +cmd:addTime('RCNN Example') + local tensor_type if opt.gpu > 0 then require 'cunn' @@ -29,8 +34,17 @@ else tensor_type = 'torch.FloatTensor' end +if opt.seed ~= 0 then + torch.manualSeed(opt.seed) + if opt.gpu > 0 then + cutorch.manualSeed(opt.seed) + end +end + +torch.setnumthreads(opt.numthreads) + -------------------------------------------------------------------------------- --- define data +-- define data for training -------------------------------------------------------------------------------- -- this class holds all the necessary informationn regarding the dataset @@ -39,6 +53,8 @@ ds = nnf.DataSetPascal{ datadir='datasets/VOCdevkit', roidbdir='data/selective_search_data' } +print('DataSet Training:') +print(ds) -- define the transformations to do in the image before -- passing it to the network local image_transformer= nnf.ImageTransformer{ @@ -46,6 +62,7 @@ local image_transformer= nnf.ImageTransformer{ raw_scale = 255, swap = {3,2,1} } + -------------------------------------------------------------------------------- -- define feature providers -------------------------------------------------------------------------------- @@ -59,6 +76,10 @@ fp = nnf.RCNN{ } -- different frameworks can behave differently during training and testing fp:training() + +print('Feature Provider:') +print(fp) + -------------------------------------------------------------------------------- -- define batch providers -------------------------------------------------------------------------------- @@ -72,6 +93,8 @@ bp = nnf.BatchProviderRC{ } bp:setupData() +print('Batch Provider:') +print(bp) -------------------------------------------------------------------------------- -- define model and criterion -------------------------------------------------------------------------------- @@ -82,6 +105,12 @@ criterion = nn.CrossEntropyCriterion() model:type(tensor_type) criterion:type(tensor_type) + +print('Model:') +print(model) +print('Criterion:') +print(criterion) + -------------------------------------------------------------------------------- -- train -------------------------------------------------------------------------------- @@ -103,6 +132,7 @@ for i=1,num_iter do end print(('Iteration %3d/%-3d'):format(i,num_iter)) trainer:train(opt.disp_iter) + print((' Training error: %.5f'):format(trainer.fx[i])) if i% save_step == 0 then torch.save(paths.concat(rundir, 'model.t7'), lightModel) @@ -110,10 +140,10 @@ for i=1,num_iter do end torch.save(paths.concat(rundir, 'model.t7'), lightModel) + -------------------------------------------------------------------------------- --- evaluate +-- evaluation -------------------------------------------------------------------------------- - -- add softmax to classfier, because we were using nn.CrossEntropyCriterion local softmax = nn.SoftMax() softmax:type(tensor_type) @@ -125,6 +155,8 @@ dsv = nnf.DataSetPascal{ datadir='datasets/VOCdevkit', roidbdir='data/selective_search_data' } +print('DataSet Evaluation:') +print(dsv) -- feature provider for evaluation fpv = nnf.RCNN{ @@ -132,6 +164,8 @@ fpv = nnf.RCNN{ crop_size=crop_size } fpv:evaluate() +print('Feature Provider Evaluation:') +print(fpv) -- define the class to test the model on the full dataset tester = nnf.Tester(model, fpv, dsv) From ddabe6b749fd07df61f0d56eb7d8b09bde42bc38 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 19 Oct 2015 07:52:12 +0200 Subject: [PATCH 58/79] Doc tweak --- README.md | 16 ++++++++++------ visualize_detections.lua | 2 +- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 161b99f..d0f1f17 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ The constructor take the following optional arguments: ### Examples Here we show a simple example demonstrating how to perform object detection given an image and a set of bounding boxes. - +Run it using `qlua` for the visualization part. ```lua require 'nnf' require 'image' @@ -96,19 +96,23 @@ require 'nn' model = torch.load('model.t7') I = image.lena() -bboxes = {1,1,200,200} +-- generate some random bounding boxes +bboxes = torch.Tensor(100,4) +bboxes:select(2,1):random(1,I:size(3)/2) +bboxes:select(2,2):random(1,I:size(2)/2) +bboxes:select(2,3):random(I:size(3)/2+1,I:size(3)) +bboxes:select(2,4):random(I:size(2)/2+1,I:size(2)) image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, raw_scale = 255, swap = {3,2,1}} feat_provider = nnf.RCNN{crop_size=227,image_transformer=image_transformer} --- the following could also be done by creating an instance of ImageDetect --- and calling :detect(I,boxes) -feats = feat_provider:getFeature(I,bboxes) -scores = feat_provider:compute(model,feats) +detector = nnf.ImageDetect(model, feat_provider) +scores, boxes = detector:detect(I, boxes) -- visualization +dofile 'visualize_detections.lua' threshold = 0.5 visualize_detections(I,bboxes,scores,threshold) diff --git a/visualize_detections.lua b/visualize_detections.lua index 19c5539..63924eb 100644 --- a/visualize_detections.lua +++ b/visualize_detections.lua @@ -19,7 +19,7 @@ function visualize_detections(im,boxes,scores,thresh,cl_names) local rr = r[idx_thresh] local boxes_thresh = boxes:index(1,rr) - local keep = nms(torch.cat(boxes_thresh:float(),max_score,2),0.3) + local keep = nms(torch.cat(boxes_thresh:float(),max_score:float(),2),0.3) boxes_thresh = boxes_thresh:index(1,keep) max_score = max_score:index(1,keep) From cfba0e8061059a994703e1dc8e9ca02ef0f356f9 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Mon, 19 Oct 2015 08:05:21 +0200 Subject: [PATCH 59/79] Fix readme --- README.md | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index d0f1f17..698c793 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,7 @@ image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, feat_provider = nnf.RCNN{crop_size=227,image_transformer=image_transformer} detector = nnf.ImageDetect(model, feat_provider) -scores, boxes = detector:detect(I, boxes) +scores, bboxes = detector:detect(I, bboxes) -- visualization dofile 'visualize_detections.lua' @@ -123,6 +123,22 @@ For an illustration on how to use this code to train a detector, or to evaluate #### Bounding box proposals Note that this repo doesn't contain code for generating bounding box proposals. For the moment, they are pre-computed and loaded at run time. +#### Model definition +All the detection framework implemented here supposes that you already have a pre-trained classification network (trained for example on ImageNet). They reuse this pre-trained network as an initialization for the subsequent fine-tuning. + +In `models/` you will find the model definition for several classic networks used in object detection. + +The zeiler pretrained model is available at [https://drive.google.com/open?id=0B-TTdm1WNtybdzdMUHhLc05PSE0&authuser=0](https://drive.google.com/open?id=0B-TTdm1WNtybdzdMUHhLc05PSE0&authuser=0). +It is supposed to be at `data/models` +If you want to use your own model in SPP framework, make sure that it follows the pattern +``` +model = nn.Sequential() +model:add(features) +model:add(pooling_layer) +model:add(classifier) +``` +where `features` can be a `nn.Sequential` of several convolutions and `pooling_layer` is the last pooling with reshaping of the data to feed it to the classifer. See `models/zeiler.lua` for an example. + ### Dependencies It requires the following packages @@ -160,18 +176,6 @@ First, clone this repo git clone https://github.com/fmassa/object-detection.torch.git ``` -The zeiler pretrained model is available at [https://drive.google.com/open?id=0B-TTdm1WNtybdzdMUHhLc05PSE0&authuser=0](https://drive.google.com/open?id=0B-TTdm1WNtybdzdMUHhLc05PSE0&authuser=0). -It is supposed to be at `data/models`. -If you want to use your own model in SPP framework, make sure that it follows the pattern -``` -model = nn.Sequential() -model:add(features) -model:add(pooling_layer) -model:add(classifier) -``` -where `features` can be a `nn.Sequential` of several convolutions and `pooling_layer` is the last pooling with reshaping of the data to feed it to the classifer. See `models/zeiler.lua` for an example. - - The default is to consider that the dataset is present in `datasets/VOCdevkit/VOC2007/`. The default location of bounding boxes `.mat` files (in RCNN format) is supposed to be in `data/selective_search_data/`. From 7d5950a5becff8d1d2e2b9910881bea3c1898537 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Mon, 9 Nov 2015 20:07:54 +0100 Subject: [PATCH 60/79] Add threads to RCNN Speeds up bounding box processing time --- RCNN.lua | 150 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 98 insertions(+), 52 deletions(-) diff --git a/RCNN.lua b/RCNN.lua index 5af8200..310b190 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -1,4 +1,3 @@ -local argcheck = require 'argcheck' local flipBoundingBoxes = paths.dofile('utils.lua').flipBoundingBoxes local argcheck = require 'argcheck' @@ -25,6 +24,14 @@ local initcheck = argcheck{ type="number", default=128, help="maximum size of batches during evaluation"}, + {name="num_threads", + type="number", + default=8, + help="number of threads for bounding box cropping"}, + {name="iter_per_thread", + type="number", + default=8, + help="number of bbox croppings per thread"}, {name="dataset", type="nnf.DataSetPascal", -- change to allow other datasets opt=true, @@ -35,41 +42,12 @@ local initcheck = argcheck{ local RCNN = torch.class('nnf.RCNN') RCNN._isFeatureProvider = true -function RCNN:__init(...) --- self.image_transformer = nnf.ImageTransformer{ --- mean_pix={123.68/255,116.779/255,103.939/255}} - - self.image_mean = nil - - local opts = initcheck(...) - for k,v in pairs(opts) do self[k] = v end - - self.output_size = {3,self.crop_size,self.crop_size} - self.train = true -end - -function RCNN:training() - self.train = true -end - -function RCNN:evaluate() - self.train = false -end - -function RCNN:getCrop(output,I,bbox) - -- suppose I is in BGR, as image_mean - -- [x1 y1 x2 y2] order - - local crop_size = self.crop_size - local image_mean = self.image_mean - local padding = self.padding - local use_square = self.use_square - +local function RCNNCrop(output,I,box,crop_size,padding,use_square,crop_buffer) local pad_w = 0; local pad_h = 0; local crop_width = crop_size; local crop_height = crop_size; - + local bbox = {box[1],box[2],box[3],box[4]} ------ if padding > 0 or use_square then local scale = crop_size/(crop_size - padding*2) @@ -121,28 +99,62 @@ function RCNN:getCrop(output,I,bbox) ------ local patch = I[{{},{bbox[2],bbox[4]},{bbox[1],bbox[3]}}] - self._crop = self._crop or torch.FloatTensor(3,self.crop_size,self.crop_size) - self._crop:resize(3,crop_height,crop_width) - image.scale(self._crop,patch,'bilinear'); - local tmp = self._crop - - if image_mean then - tmp:add(-1,image_mean[{{},{pad_h+1,pad_h+crop_height}, - {pad_w+1,pad_w+crop_width}}]) - end - - output[{{},{pad_h+1,pad_h+crop_height}, {pad_w+1,pad_w+crop_width}}] = tmp + crop_buffer:resize(3,crop_height,crop_width) + image.scale(crop_buffer,patch,'bilinear'); - return output + output[{{},{pad_h+1,pad_h+crop_height}, {pad_w+1,pad_w+crop_width}}] = crop_buffer end -function RCNN:getFeature(im_idx,bbox,flip) - local flip = flip==nil and false or flip - - local crop_feat = self:getCrop(im_idx,bbox,flip) + +function RCNN:__init(...) - return crop_feat + local opts = initcheck(...) + for k,v in pairs(opts) do self[k] = v end + + self.output_size = {3,self.crop_size,self.crop_size} + self.train = true + + if self.num_threads > 1 then + local crop_size = self.crop_size + local threads = require 'threads' + threads.serialization('threads.sharedserialize') + self.donkeys = threads.Threads( + self.num_threads, + function() + require 'torch' + require 'image' + end, + function(idx) + RCNNCrop = RCNNCrop + torch.setheaptracking(true) + crop_buffer = torch.FloatTensor(3,crop_size,crop_size) + print(string.format('Starting RCNN thread with id: %d', idx)) + end + ) + end +end + +function RCNN:training() + self.train = true +end + +function RCNN:evaluate() + self.train = false +end + +function RCNN:getCrop(output,I,bbox) + -- [x1 y1 x2 y2] order + + local crop_size = self.crop_size + local padding = self.padding + local use_square = self.use_square + + self._crop_buffer = self._crop_buffer or torch.FloatTensor(3,crop_size,crop_size) + RCNNCrop(output,I,bbox,crop_size,padding,use_square,self._crop_buffer) + + return output + end function RCNN:getFeature(im,bbox,flip) @@ -169,9 +181,43 @@ function RCNN:getFeature(im,bbox,flip) self._feat:resize(num_boxes,table.unpack(self.output_size)):zero() - -- use threads to make it faster - for i=1,num_boxes do - self:getCrop(self._feat[i],im,bbox[i]) + -- use threads to speed up bbox processing + if self.num_threads > 1 and num_boxes > self.iter_per_thread then + local feat = self._feat + local img = im + local bndbox = bbox + local crop_size = self.crop_size + local padding = self.padding + local use_square = self.use_square + local iter_per_thread = self.iter_per_thread + local num_launches = math.ceil(num_boxes/iter_per_thread) + for i=1,num_launches do + local iter_per_thread_local + if i == num_launches then + -- last thread launches the remainder of the bboxes + iter_per_thread_local = (num_boxes-1)%iter_per_thread + 1 + else + iter_per_thread_local = iter_per_thread + end + self.donkeys:addjob( + function() + for j=1,iter_per_thread_local do + local f = feat[(i-1)*iter_per_thread+j] + local boundingbox = bndbox[(i-1)*iter_per_thread+j] + -- crop_buffer is global in each thread + RCNNCrop(f,img,boundingbox,crop_size,padding,use_square,crop_buffer) + end + --collectgarbage() + return + end + ) + end + self.donkeys:synchronize() + + else + for i=1,num_boxes do + self:getCrop(self._feat[i],im,bbox[i]) + end end return self._feat From a84e16a4babac06c6a2654bce0d59cc66086119d Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Mon, 9 Nov 2015 20:28:36 +0100 Subject: [PATCH 61/79] Force image to be float in RCNN --- RCNN.lua | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/RCNN.lua b/RCNN.lua index 310b190..8536cf6 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -164,6 +164,14 @@ function RCNN:getFeature(im,bbox,flip) assert(self.dataset, 'you must provide a dataset if using numeric indices') im = self.dataset:getImage(im) end + + if torch.type(im) ~= 'torch.FloatTensor' then + -- force image to be float + self._im = self._im or torch.FloatTensor() + self._im:resize(im:size()):copy(im) + im = self._im + end + if type(bbox) == 'table' then bbox = torch.FloatTensor(bbox) end From 8c906e926ede1a717fe68d1388968053537360bc Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Tue, 10 Nov 2015 12:33:33 +0100 Subject: [PATCH 62/79] Avoid modifying original bbox in RCNN SPP --- FRCNN.lua | 1 + RCNN.lua | 6 ++++++ SPP.lua | 6 ++++++ 3 files changed, 13 insertions(+) diff --git a/FRCNN.lua b/FRCNN.lua index 064c8c3..248b2d9 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -118,6 +118,7 @@ function FRCNN:projectImageROIs(im_rois,scales,do_flip,imgs_size) end end else -- not yet tested + error('Multi-scale testing not yet tested') local scales = torch.FloatTensor(scales) im_rois = im_rois[1] local widths = im_rois[{{},3}] - im_rois[{{},1}] + 1 diff --git a/RCNN.lua b/RCNN.lua index 8536cf6..2dfa36e 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -174,6 +174,12 @@ function RCNN:getFeature(im,bbox,flip) if type(bbox) == 'table' then bbox = torch.FloatTensor(bbox) + elseif torch.isTensor(bbox) and flip then + -- creates a copy of the bboxes to avoid modifying the original + -- bboxes in the flipping + self._bbox = self._bbox or torch.FloatTensor() + self._bbox:resize(bbox:size()):copy(bbox) + bbox = self._bbox end im = self.image_transformer:preprocess(im) diff --git a/SPP.lua b/SPP.lua index 89c6081..13b0851 100644 --- a/SPP.lua +++ b/SPP.lua @@ -140,6 +140,12 @@ function SPP:getCrop(im_idx,bbox,flip) if type(bbox) == 'table' then bbox = torch.FloatTensor(bbox) + elseif torch.isTensor(bbox) and flip then + -- creates a copy of the bboxes to avoid modifying the original + -- bboxes in the flipping + self._bbox = self._bbox or torch.FloatTensor() + self._bbox:resize(bbox:size()):copy(bbox) + bbox = self._bbox end bbox = bbox:dim() == 1 and bbox:view(1,-1) or bbox From 0962e7037e832473e63ea71ca7ef522d0a6610fc Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 14 Nov 2015 17:44:47 +0100 Subject: [PATCH 63/79] Updating readme and examples with threads --- README.md | 4 ++- examples/train_test_rcnn.lua | 56 +++++++++++++++++++----------------- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 698c793..1902879 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ The constructor has the following arguments: * `crop_size` * `padding` * `use_square` + * `num_threads` number of parallel threads #### SPP @@ -106,7 +107,8 @@ bboxes:select(2,4):random(I:size(2)/2+1,I:size(2)) image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, raw_scale = 255, swap = {3,2,1}} -feat_provider = nnf.RCNN{crop_size=227,image_transformer=image_transformer} +feat_provider = nnf.RCNN{crop_size=227,image_transformer=image_transformer, + num_threads=6} detector = nnf.ImageDetect(model, feat_provider) scores, bboxes = detector:detect(I, bboxes) diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua index 4aeb0bc..9b0295f 100644 --- a/examples/train_test_rcnn.lua +++ b/examples/train_test_rcnn.lua @@ -43,6 +43,30 @@ end torch.setnumthreads(opt.numthreads) +-------------------------------------------------------------------------------- +-- define model and criterion +-------------------------------------------------------------------------------- +paths.dofile('../models/alexnet.lua') +model = createModel() + +criterion = nn.CrossEntropyCriterion() + +model:type(tensor_type) +criterion:type(tensor_type) + +print('Model:') +print(model) +print('Criterion:') +print(criterion) + +-- define the transformations to do in the image before +-- passing it to the network +local image_transformer= nnf.ImageTransformer{ + mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1} +} + -------------------------------------------------------------------------------- -- define data for training -------------------------------------------------------------------------------- @@ -55,14 +79,6 @@ ds = nnf.DataSetPascal{ } print('DataSet Training:') print(ds) --- define the transformations to do in the image before --- passing it to the network -local image_transformer= nnf.ImageTransformer{ - mean_pix={102.9801,115.9465,122.7717}, - raw_scale = 255, - swap = {3,2,1} -} - -------------------------------------------------------------------------------- -- define feature providers -------------------------------------------------------------------------------- @@ -72,7 +88,8 @@ local crop_size = 224 -- the feature provider extract the features for a given image + bounding box fp = nnf.RCNN{ image_transformer=image_transformer, - crop_size=crop_size + crop_size=crop_size, + num_threads=opt.numthreads } -- different frameworks can behave differently during training and testing fp:training() @@ -95,22 +112,6 @@ bp:setupData() print('Batch Provider:') print(bp) --------------------------------------------------------------------------------- --- define model and criterion --------------------------------------------------------------------------------- -paths.dofile('../models/alexnet.lua') -model = createModel() - -criterion = nn.CrossEntropyCriterion() - -model:type(tensor_type) -criterion:type(tensor_type) - -print('Model:') -print(model) -print('Criterion:') -print(criterion) - -------------------------------------------------------------------------------- -- train -------------------------------------------------------------------------------- @@ -144,7 +145,7 @@ torch.save(paths.concat(rundir, 'model.t7'), lightModel) -------------------------------------------------------------------------------- -- evaluation -------------------------------------------------------------------------------- --- add softmax to classfier, because we were using nn.CrossEntropyCriterion +-- add softmax to classifier, because we were using nn.CrossEntropyCriterion local softmax = nn.SoftMax() softmax:type(tensor_type) model:add(softmax) @@ -161,7 +162,8 @@ print(dsv) -- feature provider for evaluation fpv = nnf.RCNN{ image_transformer=image_transformer, - crop_size=crop_size + crop_size=crop_size, + num_threads=opt.numthreads } fpv:evaluate() print('Feature Provider Evaluation:') From a8479f40da97dad53447ffc7b2adce259afd0f57 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 15 Nov 2015 14:33:13 +0100 Subject: [PATCH 64/79] Remove old Tester --- Tester_depreceated.lua | 211 ----------------------------------------- 1 file changed, 211 deletions(-) delete mode 100644 Tester_depreceated.lua diff --git a/Tester_depreceated.lua b/Tester_depreceated.lua deleted file mode 100644 index 4c84ace..0000000 --- a/Tester_depreceated.lua +++ /dev/null @@ -1,211 +0,0 @@ -local utils = paths.dofile('utils.lua') -local nms = paths.dofile('nms.lua') - -local keep_top_k = utils.keep_top_k -local VOCevaldet = utils.VOCevaldet - -local Tester = torch.class('nnf.Tester') - -function Tester:__init(module,feat_provider) - self.dataset = feat_provider.dataset - self.module = module - self.feat_provider = feat_provider - - self.feat_dim = {256*50} - self.max_batch_size = 4000 - - self.cachefolder = nil - self.cachename = nil - self.suffix = '' - self.verbose = true -end - --- improve it ! -function Tester:validate(criterion) - - local tname = paths.concat(self.cachefolder,self.cachename) - local valData - if paths.filep(tname) then - valData = torch.load(tname) - else - -- batch_provider need to be set before - valData = {} - valData.inputs,valData.targets = self.batch_provider:getBatch() - torch.save(tname,valData) - self.batch_provider = nil - end - - local num_batches = valData.inputs:size(1) - local module = self.module - - local err = 0 - local inputs = torch.CudaTensor() - local targets = torch.CudaTensor() - for t=1,num_batches do - xlua.progress(t,num_batches) - - inputs:resize(valData.inputs[t]:size()):copy(valData.inputs[t]) - targets:resize(valData.targets[t]:size()):copy(valData.targets[t]) - - local output = module:forward(inputs) - - err = err + criterion:forward(output,targets) - end - - valData = nil - collectgarbage() - - return err/num_batches -end - -function Tester:test(iteration) - - local dataset = self.dataset - local module = self.module - local feat_provider = self.feat_provider - - local pathfolder = paths.concat(self.cachefolder,'test_iter'..iteration) - paths.mkdir(pathfolder) - - module:evaluate() - dataset:loadROIDB() - - local feats = torch.FloatTensor() - local feats_batched = {} - local feats_cuda = torch.CudaTensor() - - local output = torch.FloatTensor() - - local output_dim = module:get(module:size()) - - local softmax = nn.SoftMax():float() - - local boxes - -- - local aboxes = {} - for i=1,dataset.num_classes do - table.insert(aboxes,{}) - end - - local max_per_set = 5*dataset:size() - local max_per_image = 100 - local thresh = torch.ones(dataset.num_classes):mul(-1.5) - local scored_boxes = torch.FloatTensor() - - local timer = torch.Timer() - local timer2 = torch.Timer() - local timer3 = torch.Timer() - - for i=1,dataset:size() do - timer:reset() - io.write(('test: (%s) %5d/%-5d '):format(dataset.dataset_name,i,dataset:size())); - boxes = dataset:getROIBoxes(i):float() - local num_boxes = boxes:size(1) - -- compute image feature maps - timer3:reset() - feats:resize(num_boxes,unpack(self.feat_dim)) - for idx=1,num_boxes do - feats[idx] = feat_provider:getFeature(i,boxes[idx]) - end - local tt = timer3:time().real - -- compute classification scores - torch.split(feats_batched,feats,self.max_batch_size,1) - timer3:reset() - for idx,f in ipairs(feats_batched) do - local fs = f:size(1) - feats_cuda:resize(fs,unpack(self.feat_dim)):copy(f) - module:forward(feats_cuda) - if idx == 1 then - local out_size = module.output:size():totable() - table.remove(out_size,1) - output:resize(num_boxes,unpack(out_size)) - end - output:narrow(1,(idx-1)*self.max_batch_size+1,fs):copy(module.output) - end - local add_bg = 0 - if dataset.num_classes ~= output:size(2) then -- if there is no svm - output = softmax:forward(output) - add_bg = 1 - end - - local tt2 = timer3:time().real - - timer2:reset() - for j=1,dataset.num_classes do - local scores = output:select(2,j+add_bg) - local idx = torch.range(1,scores:numel()):long() - local idx2 = scores:gt(thresh[j]) - idx = idx[idx2] - scored_boxes:resize(idx:numel(),5) - if scored_boxes:numel() > 0 then - scored_boxes:narrow(2,1,4):index(boxes,1,idx) - scored_boxes:select(2,5):copy(scores[idx2]) - end - local keep = nms(scored_boxes,0.3) - if keep:numel()>0 then - local _,ord = torch.sort(scored_boxes:select(2,5):index(1,keep),true) - ord = ord:narrow(1,1,math.min(ord:numel(),max_per_image)) - keep = keep:index(1,ord) - aboxes[j][i] = scored_boxes:index(1,keep) - else - aboxes[j][i] = torch.FloatTensor() - end - - if i%1000 == 0 then - aboxes[j],thresh[j] = keep_top_k(aboxes[j],max_per_set) - end - - end - - io.write((' prepare feat time: %.3f, forward time: %.3f, select time: %.3fs, total time: %.3fs\n'):format(tt,tt2,timer2:time().real,timer:time().real)); - --collectgarbage() - --mattorch.save(paths.concat(pathfolder,dataset.img_ids[i]..'.mat'),output:double()) - end - - for i = 1,dataset.num_classes do - -- go back through and prune out detections below the found threshold - for j = 1,dataset:size() do - if aboxes[i][j]:numel() > 0 then - local I = aboxes[i][j]:select(2,5):lt(thresh[i]) - local idx = torch.range(1,aboxes[i][j]:size(1)):long() - idx = idx[I] - if idx:numel()>0 then - aboxes[i][j] = aboxes[i][j]:index(1,idx) - end - end - end - save_file = paths.concat(pathfolder, dataset.classes[i].. '_boxes_'.. - dataset.dataset_name..self.suffix) - torch.save(save_file, aboxes) - end - - local res = {} - for i=1,dataset.num_classes do - local cls = dataset.classes[i] - res[i] = VOCevaldet(dataset,aboxes[i],cls) - end - res = torch.Tensor(res) - print('Results:') - -- print class names - io.write('|') - for i = 1, dataset.num_classes do - io.write(('%5s|'):format(dataset.classes[i])) - end - io.write('\n|') - -- print class scores - for i = 1, dataset.num_classes do - local l = #dataset.classes[i] < 5 and 5 or #dataset.classes[i] - local l = res[i] == res[i] and l-5 or l-3 - if l > 0 then - io.write(('%.3f%'..l..'s|'):format(res[i],' ')) - else - io.write(('%.3f|'):format(res[i])) - end - end - io.write('\n') - io.write(('mAP: %.4f\n'):format(res:mean(1)[1])) - - -- clean roidb to free memory - dataset.roidb = nil - return res -end From c0d474e7db2600ad43155f8dbb20e46b8d7076bc Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 15 Nov 2015 15:25:54 +0100 Subject: [PATCH 65/79] Adding pretty-print to feature transformers --- FRCNN.lua | 9 ++++++++- ImageTransformer.lua | 9 +++++++++ RCNN.lua | 10 ++++++++++ README.md | 3 ++- SPP.lua | 8 ++++++++ examples/train_test_rcnn.lua | 14 ++++++++++---- utils.lua | 1 + 7 files changed, 48 insertions(+), 6 deletions(-) diff --git a/FRCNN.lua b/FRCNN.lua index 248b2d9..9947127 100644 --- a/FRCNN.lua +++ b/FRCNN.lua @@ -18,7 +18,7 @@ local initcheck = argcheck{ {name="inputArea", type="number", default=224^2, - help="force square crops"}, + help="input area of the bounding box"}, {name="image_transformer", type="nnf.ImageTransformer", default=nnf.ImageTransformer{}, @@ -176,3 +176,10 @@ function FRCNN:compute(model, inputs) return model:forward(self.inputs) end +function FRCNN:__tostring() + local str = torch.type(self) + str = str .. '\n Image scales: [' .. table.concat(self.scale,', ')..']' + str = str .. '\n Max image size: ' .. self.max_size + str = str .. '\n Input area: ' .. self.inputArea + return str +end diff --git a/ImageTransformer.lua b/ImageTransformer.lua index d7b213b..3bdb175 100644 --- a/ImageTransformer.lua +++ b/ImageTransformer.lua @@ -37,3 +37,12 @@ function ImageTransformer:preprocess(I) return I end +function ImageTransformer:__tostring() + local str = torch.type(self) + if self.swap then + str = str .. '\n Channel swap: [' .. table.concat(self.swap,', ') .. ']' + end + str = str .. '\n Raw scale: '.. self.raw_scale + str = str .. '\n Mean pixel: [' .. table.concat(self.mean_pix,', ') .. ']' + return str +end diff --git a/RCNN.lua b/RCNN.lua index 2dfa36e..13b87a9 100644 --- a/RCNN.lua +++ b/RCNN.lua @@ -262,3 +262,13 @@ function RCNN:compute(model,inputs) end return self.output end + +function RCNN:__tostring() + local str = torch.type(self) + str = str .. '\n Crop size: ' .. self.crop_size + str = str .. '\n Context padding: ' .. self.padding + if self.use_square then + str = str .. '\n Use square: true' + end + return str +end diff --git a/README.md b/README.md index 1902879..843eac3 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ This library aims to provide a simple architecture to easily perform object detection in [torch](http://torch.ch). It currently contains code for training the following frameworks: [RCNN](http://arxiv.org/abs/1311.2524), [SPP](http://arxiv.org/abs/1406.4729) and [Fast-RCNN](http://arxiv.org/abs/1504.08083). -It consists of 6 basic classes: +It consists of 7 basic classes: * ImageTransformer: Preprocess an image before feeding it to the network * DataSetDetection: Generic dataset class for object detection. @@ -17,6 +17,7 @@ It consists of 6 basic classes: * [BatchProviderRC](#batch_provider_rc): ROI-Centric * [BatchProviderIC](#batch_provider_ic): Image-Centric * ImageDetect: Encapsulates a model and a feature provider to perform the detection +* Trainer: Simple class to perform the model training. * Tester: Evaluate the detection using Pascal VOC approach. diff --git a/SPP.lua b/SPP.lua index 13b0851..9f270e8 100644 --- a/SPP.lua +++ b/SPP.lua @@ -533,3 +533,11 @@ end function SPP:cuda() return self:type('torch.CudaTensor') end + +function SPP:__tostring() + local str = torch.type(self) + str = str .. '\n Image scales: [' .. table.concat(self.scales,', ')..']' + str = str .. '\n Input area: ' .. self.inputArea + return str +end + diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua index 9b0295f..9846d6a 100644 --- a/examples/train_test_rcnn.lua +++ b/examples/train_test_rcnn.lua @@ -16,7 +16,7 @@ cmd:option('-numthreads',6, 'number of threads') opt = cmd:parse(arg or {}) -exp_name = cmd:string(opt.name, opt, {name=true, gpu=true}) +exp_name = cmd:string(opt.name, opt, {name=true, gpu=true, numthreads=true}) rundir = '../cachedir/'..exp_name paths.mkdir(rundir) @@ -29,9 +29,11 @@ if opt.gpu > 0 then require 'cunn' cutorch.setDevice(opt.gpu) tensor_type = 'torch.CudaTensor' + print('Using GPU mode on device '..opt.gpu) else require 'nn' tensor_type = 'torch.FloatTensor' + print('Using CPU mode') end if opt.seed ~= 0 then @@ -39,6 +41,7 @@ if opt.seed ~= 0 then if opt.gpu > 0 then cutorch.manualSeed(opt.seed) end + print('Using fixed seed: '..opt.seed) end torch.setnumthreads(opt.numthreads) @@ -46,7 +49,7 @@ torch.setnumthreads(opt.numthreads) -------------------------------------------------------------------------------- -- define model and criterion -------------------------------------------------------------------------------- -paths.dofile('../models/alexnet.lua') +local createModel = paths.dofile('../models/alexnet.lua') model = createModel() criterion = nn.CrossEntropyCriterion() @@ -67,6 +70,7 @@ local image_transformer= nnf.ImageTransformer{ swap = {3,2,1} } +print(image_transformer) -------------------------------------------------------------------------------- -- define data for training -------------------------------------------------------------------------------- @@ -75,7 +79,8 @@ local image_transformer= nnf.ImageTransformer{ ds = nnf.DataSetPascal{ image_set='trainval', datadir='datasets/VOCdevkit', - roidbdir='data/selective_search_data' + roidbdir='data/selective_search_data', + year=2007 } print('DataSet Training:') print(ds) @@ -154,7 +159,8 @@ model:add(softmax) dsv = nnf.DataSetPascal{ image_set='test', datadir='datasets/VOCdevkit', - roidbdir='data/selective_search_data' + roidbdir='data/selective_search_data', + year=2007 } print('DataSet Evaluation:') print(dsv) diff --git a/utils.lua b/utils.lua index 20bdf1c..689c00f 100644 --- a/utils.lua +++ b/utils.lua @@ -2,6 +2,7 @@ -- utility functions for the evaluation part -------------------------------------------------------------------------------- +-- can be replaced by the new torch.cat function local function joinTable(input,dim) local size = torch.LongStorage() local is_ok = false From d1075c6c6bbb5a8aa7d69a624eae52c7558c0d90 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 15 Nov 2015 15:51:47 +0100 Subject: [PATCH 66/79] Doc tweak --- README.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/README.md b/README.md index 843eac3..274b5e2 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,32 @@ The constructor has the following arguments: * `scale` * `max_size` * `inputArea` +The output of `getFeature()` is a table with two entries, the preprocessed image/images as the first element, and the projected bounding boxes. An example of a CNN model structure which can be used with Fast-RCNN is as follows: +```lua +-- define features and classifier as you wish. +-- Can use loadcaffe to read from a saved model, for example +features = torch.load('alexnet_features.t7') +classifier = torch.load('alexnet_classifier.t7') + +-- define the ROIPooling layer +-- can use either inn.ROIPooling or nnf.ROIPooling (with CPU support) +-- let's just use standard parameters from Fast-RCNN paper +local ROIPooling = inn.ROIPooling(6,6):setSpatialScale(1/16) + +-- create parallel model which takes as input the images and +-- bounding boxes, and pass the images through the convolutional +-- features and simply copy the bounding boxes +local prl = nn.ParallelTable() +prl:add(features) +prl:add(nn.Identity()) + +-- this is the final model +model = nn.Sequential() +model:add(prl) +model:add(ROIPooling) +model:add(nn.View(-1):setNumInputDims(3)) +model:add(classifier) +``` ### Batch Provider From 91691c96f8de24d4a0a82aaf757ac77447c52469 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 15 Nov 2015 15:54:11 +0100 Subject: [PATCH 67/79] Doc fix --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 274b5e2..e701f35 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,7 @@ The constructor has the following arguments: * `scale` * `max_size` * `inputArea` + The output of `getFeature()` is a table with two entries, the preprocessed image/images as the first element, and the projected bounding boxes. An example of a CNN model structure which can be used with Fast-RCNN is as follows: ```lua -- define features and classifier as you wish. From 8b86477c750fa8f8068e0f888280f5697c054997 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 15 Nov 2015 19:12:09 +0100 Subject: [PATCH 68/79] Updating SPP with cache function --- README.md | 4 +++- SPP.lua | 31 +++++++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e701f35..4ddeb20 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ It consists of 7 basic classes: ### Feature Provider The `FeatureProvider` class defines the way different algorithms process an image and a set of bounding boxes to feed it to the CNN. -It implements a `getFeature(image,boxes)` function, which computes the necessary transformations in the input data, a `postProcess()`, which takes the output of the network plus the original inputs and post-process them. This post-processing could be a bounding box regression step, for example. +It implements a `getFeature(image, boxes [,flip])` function, which computes the necessary transformations in the input data (the optional `flip` argument horizontaly flips the image and the bounding box correspondingly), a `postProcess()`, which takes the output of the network plus the original inputs and post-process them. This post-processing could be a bounding box regression step, for example. Every Feature Provider constructor take as input a `ImageTransformer`, and a `max_batch_size` (used for evaluation). @@ -52,6 +52,8 @@ The constructor has the following arguments: * `use_cache` * `cache_dir` +SPP allows faster training/testing by caching the convolutional feature maps. You can provide to `getFeature` instead of an image `I` an image index `i` (from a `DataSetDetection` object), which will load the corresponding feature map from disk (if already computed and if `use_cache` is set to `true`). To easily cache all features of a dataset in disk, use the method `:saveConvCache()`. + #### Fast-RCNN Similar to SPP, Fast-RCNN also crops the images in the feature space, but instead of keeping the convolutional layers fixed, they allow it to train together with the fully-connected layers. diff --git a/SPP.lua b/SPP.lua index 9f270e8..4456c2c 100644 --- a/SPP.lua +++ b/SPP.lua @@ -6,7 +6,7 @@ SPP._isFeatureProvider = true -- argcheck crashes with that many arguments, and using unordered -- doesn't seems practical --- [[ + local argcheck = paths.dofile('argcheck.lua')--require 'argcheck' local initcheck = argcheck{ pack=true, @@ -62,7 +62,7 @@ local initcheck = argcheck{ opt=true, help=""}, } ---]] + function SPP:__init(...) @@ -534,6 +534,33 @@ function SPP:cuda() return self:type('torch.CudaTensor') end +function SPP:saveConvCache() + assert(self.dataset, 'need to set a dataset to save the cache') + assert(self.use_cache, 'use_cache need to be true') + assert(self.cachedir, 'cachedir need to be set') + + local dataset = self.dataset + + print('Caching features for '..dataset.dataset_name..' ' + ..dataset.image_set) + local feat_cachedir = self.cachedir + for i=1,dataset:size() do + xlua.progress(i,dataset:size()) + local im_name = dataset.img_ids[i] + local cachefile = paths.concat(feat_cachedir,im_name) + if not paths.filep(cachefile..'.h5') then + local f = self:getConv5(i) + end + if not paths.filep(cachefile..'_flip.h5') then + local f = self:getConv5(i,true) + end + if i%50 == 0 then + collectgarbage() + collectgarbage() + end + end +end + function SPP:__tostring() local str = torch.type(self) str = str .. '\n Image scales: [' .. table.concat(self.scales,', ')..']' From b664985026c995d69426221470c58ae994e09adc Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sun, 15 Nov 2015 19:26:23 +0100 Subject: [PATCH 69/79] Remove depreceated file --- spp_compute_conv5_cache.lua | 40 ------------------------------------- 1 file changed, 40 deletions(-) delete mode 100644 spp_compute_conv5_cache.lua diff --git a/spp_compute_conv5_cache.lua b/spp_compute_conv5_cache.lua deleted file mode 100644 index 0754acf..0000000 --- a/spp_compute_conv5_cache.lua +++ /dev/null @@ -1,40 +0,0 @@ --------------------------------------------------------------------------------- --- Compute conv5 feature cache (for SPP) --------------------------------------------------------------------------------- -if opt.algo == 'SPP' then - print('Preparing conv5 features for '..ds_train.dataset_name..' ' - ..ds_train.image_set) - local feat_cachedir = feat_provider.cachedir - for i=1,ds_train:size() do - xlua.progress(i,ds_train:size()) - local im_name = ds_train.img_ids[i] - local cachefile = paths.concat(feat_cachedir,im_name) - if not paths.filep(cachefile..'.h5') then - local f = feat_provider:getConv5(i) - end - if not paths.filep(cachefile..'_flip.h5') then - local f = feat_provider:getConv5(i,true) - end - if i%50 == 0 then - collectgarbage() - collectgarbage() - end - end - - print('Preparing conv5 features for '..ds_test.dataset_name..' ' - ..ds_test.image_set) - local feat_cachedir = feat_provider_test.cachedir - for i=1,ds_test:size() do - xlua.progress(i,ds_test:size()) - local im_name = ds_test.img_ids[i] - local cachefile = paths.concat(feat_cachedir,im_name) - if not paths.filep(cachefile..'.h5') then - local f = feat_provider_test:getConv5(i) - end - if i%50 == 0 then - collectgarbage() - collectgarbage() - end - end -end - From 42d3ae20a06da92a91b4ee89ae29c9cf34afa44e Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Mon, 16 Nov 2015 16:09:36 +0100 Subject: [PATCH 70/79] Preliminary changes to SVMTrainer --- SVMTrainer.lua | 77 ++++++++++++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/SVMTrainer.lua b/SVMTrainer.lua index 6f857b1..61f6597 100644 --- a/SVMTrainer.lua +++ b/SVMTrainer.lua @@ -1,7 +1,7 @@ local SVMTrainer = torch.class('nnf.SVMTrainer') function SVMTrainer:__init(module,feat_provider) - self.dataset = feat_provider.dataset + --self.dataset = dataset self.module = module self.feat_provider = feat_provider @@ -21,58 +21,54 @@ function SVMTrainer:__init(module,feat_provider) self.evict_thresh = -1.2 self.hard_thresh = -1.0001 - self.pos_feat_type = 'mixed' -- real, mixed, synthetic + self.pos_feat_type = 'real' -- real, mixed, synthetic self.synth_neg = true - self:getFeatureStats() + --self:getFeatureStats() end -function SVMTrainer:getFeatureStats(feat_provider,module) +function SVMTrainer:getFeatureStats(dataset,feat_provider,module) - if true then - self.mean_norm = 30.578503376687 + if false then + self.mean_norm = 19.848824140978--30.578503376687 return end local feat_provider = feat_provider or self.feat_provider local module = module or self.module - local dataset = feat_provider.dataset + local dataset = dataset local boxes_per_image = 200 local num_images = math.min(dataset:size(),200) local valid_idx = torch.randperm(dataset:size()) valid_idx = valid_idx[{{1,num_images}}] - - local fc5_feat = torch.FloatTensor() - local fc7_feat = torch.FloatTensor() local feat_cumsum = 0 local feat_n = 0 + local bboxes = torch.IntTensor(boxes_per_image,4) print('Getting feature stats') for i=1,num_images do xlua.progress(i,num_images) local img_idx = valid_idx[i] + local I = dataset:getImage(img_idx) local rec = dataset:attachProposals(img_idx) local num_bbox = math.min(boxes_per_image,rec:size()) - fc5_feat:resize(num_bbox,unpack(self.feat_dim)) - fc7_feat:resize(num_bbox,4096) - - local bbox_idx = torch.randperm(rec:size()) + local bbox_idx = torch.randperm(rec:size()):long() bbox_idx = bbox_idx[{{1,num_bbox}}] - for j=1,num_bbox do - local bbox_id = bbox_idx[j] - fc5_feat[j] = feat_provider:getFeature(img_idx,rec.boxes[bbox_id]) - end - fc7_feat:copy(module:forward(fc5_feat:cuda())) - feat_n = feat_n + num_bbox - feat_cumsum = feat_cumsum + fc7_feat:pow(2):sum(2):sqrt():sum() + bboxes:index(rec.boxes,1,bbox_idx) + + local feat = feat_provider:getFeature(I,bboxes) + local final_feat = feat_provider:compute(module, feat) + + feat_n = feat_n + num_bbox + feat_cumsum = feat_cumsum + final_feat:pow(2):sum(2):sqrt():sum() end self.mean_norm = feat_cumsum/feat_n end @@ -82,10 +78,10 @@ function SVMTrainer:scaleFeatures(feat) feat:mul(target_norm/self.mean_norm) end -function SVMTrainer:getPositiveFeatures(feat_provider,module) +function SVMTrainer:getPositiveFeatures(dataset,feat_provider,module) local feat_provider = feat_provider or self.feat_provider local module = module or self.module - local dataset = feat_provider.dataset + local dataset = dataset module:evaluate() local positive_data = {} for cl_idx,cl_name in pairs(dataset.classes) do @@ -98,6 +94,11 @@ function SVMTrainer:getPositiveFeatures(feat_provider,module) local not_done = torch.ByteTensor(dataset.num_classes):fill(1) for i=1,end_idx do xlua.progress(i,end_idx) + local I = dataset:getImage(i) + --local gt_boxes, gt_classes = dataset:getGTBoxes(i) + + + local rec = dataset:attachProposals(i) local overlap = rec.overlap_class local is_gt = rec.gt @@ -111,7 +112,10 @@ function SVMTrainer:getPositiveFeatures(feat_provider,module) for j=1,rec:size() do if overlap[j][cl_idx]==1 and is_gt[j]==1 then count = count + 1 - fc5_feat[count] = feat_provider:getFeature(i,rec.boxes[j]) + local fff = feat_provider:getFeature(I,rec.boxes[j])[1] + --print(fff:size()) + --print(fc5_feat:size()) + fc5_feat[count] = fff end end if num_pos > 0 then @@ -133,15 +137,16 @@ function SVMTrainer:getPositiveFeatures(feat_provider,module) return positive_data end -function SVMTrainer:sampleNegativeFeatures(ind,feat_provider,module) +function SVMTrainer:sampleNegativeFeatures(ind,dataset,feat_provider,module) local feat_provider = feat_provider or self.feat_provider - local dataset = feat_provider.dataset + local dataset = dataset local module = module or self.module module:evaluate() collectgarbage() local first_time = self.first_time + local I = dataset:getImage(ind) local rec = dataset:attachProposals(ind) local overlap = rec.overlap_class @@ -154,11 +159,9 @@ collectgarbage() caches[cl_name] = {X_neg = {},num_added = 0} end - fc5_feat:resize(rec:size(),unpack(self.feat_dim)) - for j=1,rec:size() do - fc5_feat[j] = feat_provider:getFeature(ind,rec.boxes[j]) - end - fc7_feat:resize(rec:size(),4096):copy(module:forward(fc5_feat:cuda())) + local feat = feat_provider:getFeature(I,rec.boxes) + local fc7_feat = feat_provider:compute(module, feat) + self:scaleFeatures(fc7_feat) if first_time then @@ -264,16 +267,16 @@ function SVMTrainer:addPositiveFeatures(feat_provider,module) end -function SVMTrainer:train() - local dataset = self.dataset +function SVMTrainer:train(dataset) + --local dataset = self.dataset - print('Experiment name: '..self.expname) + --print('Experiment name: '..self.expname) self.W = torch.Tensor(dataset.num_classes,4096) self.B = torch.Tensor(dataset.num_classes) --self:selectPositiveFeatures() - self:addPositiveFeatures() + --self:addPositiveFeatures() local caches = {} for cl_idx,cl_name in pairs(dataset.classes) do @@ -313,7 +316,7 @@ function SVMTrainer:train() X = self:sampleNegativeFeatures(i-num_synth) end else - X = self:sampleNegativeFeatures(i) + X = self:sampleNegativeFeatures(i,dataset) end for cl_idx,cl_name in pairs(dataset.classes) do @@ -396,7 +399,7 @@ function SVMTrainer:train() end first_time = false end - torch.save('/home/francisco/work/projects/cross_domain/cachedir/svm_models/svm_model,'..self.expname..'.t7',{W=self.W,B=self.B}) + --torch.save('/home/francisco/work/projects/cross_domain/cachedir/svm_models/svm_model,'..self.expname..'.t7',{W=self.W,B=self.B}) return caches--X_all end From 412ff8e8873426e6b0d78be140814c5aa11e9d20 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Mon, 16 Nov 2015 16:10:02 +0100 Subject: [PATCH 71/79] Improve visualization tool --- tests/test_visualization2.lua | 6 +++++- visualize_detections.lua | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/test_visualization2.lua b/tests/test_visualization2.lua index b289e82..415f86a 100644 --- a/tests/test_visualization2.lua +++ b/tests/test_visualization2.lua @@ -33,6 +33,10 @@ boxes = ds:getROIBoxes(im_idx) scores,bb = detect:detect(I,boxes) -visualize_detections(I,boxes,scores,0.5,ds.classes) +w = visualize_detections(I,boxes,scores,0.5,ds.classes) +Im = w:image() +II = Im:toFloatTensor() + +image.save('example_frcnn.jpg',II) diff --git a/visualize_detections.lua b/visualize_detections.lua index 63924eb..b9c7406 100644 --- a/visualize_detections.lua +++ b/visualize_detections.lua @@ -55,4 +55,5 @@ function visualize_detections(im,boxes,scores,thresh,cl_names) w:setcolor("red") w:setlinewidth(2) w:stroke() + return w end From d5578652801fc7da764ce28cca1a783d3ed82e4b Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Mon, 16 Nov 2015 18:54:20 +0100 Subject: [PATCH 72/79] Improve example in README --- README.md | 34 ++++++++++++---- examples/example_frcnn_lena.jpg | Bin 0 -> 43938 bytes models/frcnn_alexnet.lua | 66 ++++++++++++++++++++++++++++++++ visualize_detections.lua | 3 ++ 4 files changed, 96 insertions(+), 7 deletions(-) create mode 100644 examples/example_frcnn_lena.jpg create mode 100644 models/frcnn_alexnet.lua diff --git a/README.md b/README.md index 4ddeb20..53405c8 100644 --- a/README.md +++ b/README.md @@ -118,16 +118,29 @@ The constructor take the following optional arguments: * `imgs_per_batch` ### Examples -Here we show a simple example demonstrating how to perform object detection given an image and a set of bounding boxes. -Run it using `qlua` for the visualization part. +Here we show a simple example demonstrating how to perform object detection given an image and a set of bounding boxes. +Run it using `qlua` for the visualization part. A pre-trained model for Fast-RCNN can be found [here](https://drive.google.com/file/d/0B-TTdm1WNtyba3I4Vm1hbFRSS2c/view?usp=sharing). ```lua require 'nnf' require 'image' +require 'cudnn' +require 'inn' require 'nn' -model = torch.load('model.t7') +-- load pre-trained Fast-RCNN model +params = torch.load('cachedir/frcnn_alexnet.t7') +loadModel = dofile 'models/frcnn_alexnet.lua' +model = loadModel(params) + +model:add(nn.SoftMax()) + +model:evaluate() +model:cuda() + +-- Load an image I = image.lena() -- generate some random bounding boxes +torch.manualSeed(500) -- fix seed for reproducibility bboxes = torch.Tensor(100,4) bboxes:select(2,1):random(1,I:size(3)/2) bboxes:select(2,2):random(1,I:size(2)/2) @@ -137,8 +150,7 @@ bboxes:select(2,4):random(I:size(2)/2+1,I:size(2)) image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, raw_scale = 255, swap = {3,2,1}} -feat_provider = nnf.RCNN{crop_size=227,image_transformer=image_transformer, - num_threads=6} +feat_provider = nnf.FRCNN{image_transformer=image_transformer} detector = nnf.ImageDetect(model, feat_provider) scores, bboxes = detector:detect(I, bboxes) @@ -146,9 +158,18 @@ scores, bboxes = detector:detect(I, bboxes) -- visualization dofile 'visualize_detections.lua' threshold = 0.5 -visualize_detections(I,bboxes,scores,threshold) +-- classes from Pascal used for training the model +cls = {'aeroplane','bicycle','bird','boat','bottle','bus','car', + 'cat','chair','cow','diningtable','dog','horse','motorbike', + 'person','pottedplant','sheep','sofa','train','tvmonitor'} + +w = visualize_detections(I,bboxes,scores,threshold,cls) ``` +This outputs the following + +![Lena](examples/example_frcnn_lena.jpg) + For an illustration on how to use this code to train a detector, or to evaluate it on Pascal, see the [examples](http://github.com/fmassa/object-detection.torch/tree/master/examples). @@ -210,4 +231,3 @@ git clone https://github.com/fmassa/object-detection.torch.git The default is to consider that the dataset is present in `datasets/VOCdevkit/VOC2007/`. The default location of bounding boxes `.mat` files (in RCNN format) is supposed to be in `data/selective_search_data/`. - diff --git a/examples/example_frcnn_lena.jpg b/examples/example_frcnn_lena.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1919fa96d5fe21e191a9149540cbdf1e85be13e GIT binary patch literal 43938 zcmbTdcRZVK{5~2DLa9~M9;IffSQW%-YmeHqwpyk3h)t<#?4pR!+MCuMMN5Jrwf9Vj zQ4~R`aeTh#oZs*DI)9yW?!5BX^StgnSDyQR-`9Oz*L^*6y#k=oRMk)g5D@_YL^mJ6 z^*rDSfQ*zBL`p&i0)fcM$!=59-=Uo4{ki zhhkt!Nl8wA2vkN~UQ|L-{J&0!$jQkmDJYrl++h;uX5$wB|J<&-0kmYd0Ji`@qWgeb zv_wE!qU&A&=go7H5dBXB{9lde7Lb^P6huaTo8sn%MjF5^A|UV*{D#-`?$*0%3Idizl5{(-@<@rlW)>6zKN)io?`eFML_wS91SL^wV< z{d;!)A1+Y zo$N!Ep%2u1u$yZU;6k~G*O|Tjwu;O@ z^sfVDdl01m_6v%qCnM9J$?`a>6=A0WsKyNTnf}p>ZNVa)~+V&LnXziGQs=00Qmzgq->0S9508` z*2Hrre7=pgebh|Kp%=&+^#Q!H;vj!Mq3tDiMZe+|#QgaO?*K{`v6e1_;o&J3LvV!r zVyZhi_f>c3;dM^>){xEXhixgz>LK*;1?}uBRmlLil8Xh;rs22DcE>V_5p%c z3I9+r!;4gdy#e?CpmZ)4c;zK`{_EF$D>&Ro&FYsLR1K@MB-1Z*Ea{kvR1{LSQ|Cck z`B5pK)rH9!ifiE3vP6*ICaAtAw}fDq=@)yw6h2U<_>j65;7(9M`087AE9igA7DOv) zzDS~Bwe}{xzvEF`1Hf8^hCaJexifpnahaAEHE5~GQzp&JFCgXCGBqF~r*2FIDjrYtVUn5}dR{r)_2MV^GdMtcBnGy9G%zs$CBpYH%W zttg}ZFWO^B*-zV*(__gYgg)cwW)8G4rC& zS7!7sH;9LcfT#acjalEyFIIxXlBY^kWI22-$*FpHAoBml;7Db8zY;oMq+;P%3auSvZK)vjyq6!pZ>!GfV=!AU9t?1Fz^0|jq@AFhiSBB?vdiSpn1t{$_P!>D z(0)v9wTUWZ1pHfQ(D76rHm#jxMoo-|wvYR!K*7<1x)Ny}urWI2ZSUVYI`3Q7?Qb5b zL}}0axh)4ZFTPPX4yf)P>$hd&~^(U zhCMx)5_N?}GWH;1s+m$z*zqYwGcG|pi&-T7X0FJlke^c#egQxfJ^MN08c^_ErF>Aqw5+8M}$##vWKK&tVZE}{P&~YPqPK!>4Dwo z2wm4sRZRc%X;((eI~|m z|G1P+BJo@HNeN1IF9?cq8<5<^03wi$;rwXWaOB=?p>6O*NCHaB(4~{2VmZH{;AouT zRtriNJBDOUqK5RGl+~N^62~AvXUSYth%FG%nvXekT>(e^r-V;luyDzt``^E)Hf{m5(2Q}5g@*}2 z7)Y(sxMyZi6gwG4Y6FKmj9}UHyQB8gm!f&>^XKi@Z*K#P&FrH3ysjSo`Zh|T9&`GB zL}~ZM2l0L7Veu0pd18N=a^1N?hR*C&t}p#YiKX9Q6N4*Bi|&@a=a#OgC?|j_4u>ta z1GYq+!j8{P{VdYjB<59qaS@&-|NZ3(PX_n~AP3eC2Qn5Df(5Yb9e2Ys!?Lu`vj>ew||SR3La@p*qm7NMDSFE?WK% z%ryPi$*AhOF8OYfZ=cV3yxagL0@e1uCayK_>H1&XIneU?!eI0F=kK}TwCGMV=Ed<( z6=mVACxl9YzTlmFx9!xQbV^hZp0N~wFJ^psDQ_}pUz41TO5s(Q&>fQb3kX(9wj6DT zE>wOR`^(idgs8*<;-aj1WMh}AqI~q6H4)aKuZM^DdkW#{<$c0M7S&I zy%L4D)q-AT7Yl)>MPQ{nTf4eIc3p3s-$W>A`Hzwy9PBwTo{2?K)_T@g)Ioj72$qIa;&OD(#LZ7O!C%#C8+D+x9AP^WLGr6z#(vv z_!K3`@(kpbhmKTev|~4s9(eF2h@C`=3fK1T6$N<;i9=EW?NWwFdH@AgB(=k~>%qX+ z7K~Tev)k<-WPA$QqT&CQS_Ane3s8fkImCS?jW>Mw>KY&h zu=PdV%HMfnZ)(Z*`--hyFNf6JlwBf@@5{P*HG+IKXke=)#56SWH!*gdX+*JDLERxo zTEl^^Fu)6JF!nN`C>1|A%gh<8P7RuBYNV!Qlbj0)TdKcVr+&HT*&nn&yHD>gU+Lw> z_R5{*T{l1E5^Xz%hRYUt%!lkQOWtx-sX`45Op1PLfZKs6rG*?U?`!`S6 zT_oYRT$E=JW?dih9jUsS<9p?+3xdNa zF9j(Pv^Ui|#6Ifu-LPuu2L7PbA0&?mD0guQc49k-+7wt46LY&iGXfjCJIr`X1qUBz z!Y2`;TCpXr*;tNifG(j5pfLbS(9SO}I9j+V9bzk#lOzM;;A!}=6t97#0jV_;$0S*Q z_al`aNQTqQxc+KHpk+$S-&5Ya8(aYkuxrC)F5Ei{(&}>iE5h zqmGTCVy~>M&790k5lx6+?Sq!D?sDbaDGWlrw@-a?e;uHN2fy+1GCt!?jQoJ-nQAPW z%X;L?|FU*w?a}@T5&hFhFoMIKAqks4J(lsYo?m{*+D3wQRrILwN-o4Q$hpSIcBb9Ivsp$HuubzC0j0uXkd4)%@SD@ za1Gd*epH?zGBf?`Ka$JmlRm&MH<`hUEdb6p8YjyoPv_*35d%Qe7YRG5KW=#=kbfl) zRaL2Z#y*mPpk1?DTKQR!ovo!$IVGB<>s(aS+7tcW$-Xnk6@y;l_i9ztZYtR(c(+eW zM0}7fg61mYets^TN{D{b7rkl#MEb_ee{*Az)c!yNO9=}qB_$9iJLm_1l8wM6?ng~5nE!mB56G#)epRWz}nbH!noI#rc=BXQcm7IlmFNOQ+?+h zpVi%)1#C^#3pN>#Ga_mQz*831zMd*zGOir^&(Kdy9GtX|TCniIYXFgYz$2AIv9QXK zA%8U1gh44(s7ZZ0WHQL{d3W53_q5D4;I7HjK~tV6M6y1C-YS>zgy+r}-g7jA|3Yy6 zX0L*<&EFJ~`bqTeHhT-^fcEdiCvyxXg9bJ!;(Q{P@5h9PiWWN>84PuMIzTCnpbXp7 z5$z^(NFl?Wmd4_jQyVy3uDWV>Z~!7JHbBVBvTX||frP$j~cF~YKORXZl->(4= zvvO1}e_5ERX$J~l(Vux*xkqk}%W=U{dDrLSeUevL1sH*^t+@l` z47KIZm7$Lf-n^s5v6cjapfy!b@l*K|-rbT@O!8L|vG?ORe?Vml6|jm|Z3Ymg5kW9Y zsYK@uJ2WCs4w}5BDz5j+TX%a_W-XG*Xk}VwwNtwa`Lrm14{>0%$%ah}S<8K}S*HiJ z%)l3GXG})!ALr9pZZ*ByyFNvS|Fl%ocy@1%eGI zPPL2u4Ohd6VB~0DNDbWS0#iYDHKBhLSwO4G@}}k}0UW+W&VUXYAw=HYT|l)F#nJIY z;R;pwsv$z=6-6!ukmKuHH zE3{0W^AV%axxElZ2ILv5CG;a3VDM!N@i6bO4CcL+t#*TD1q3x?5YDlxl#l2kqt zIqrkpPa*?3hzLdEVJd(6??+iR(A3Nv8ByH(4v&O+=e+#mIKafOh&AB1n}Hv15h^uN z9t(bM;FeT)nBYnZ8pzL10{AMrrYm<1KoSa5dME&}l#U$Rw~WRE{rqffr`G`J78VF0 z`iMjoET@2x1wIfN0!QP0RZG?=VyZ~z8*&nv!QRoWESzlpcI!VB`d`AI({q4+Mn_<( z#ic0!G7Ug5b_TC(#G;v!e3LZjYsksO=%ZJ}y5T?*O{2g^ z*@1%gKLQs1xP9={3g9xoKpQ9e=m8$_=EIPz6xoKpyRqI$!BWLv$Y_e&1VJS-H&cUn z$&Ky@kIemQ-gu8MWRmTE51E3~s78EqnrRYA?GzRRKf2UYy-%erkNh`-4u7V|oNi=!^FX$?VMdRoPi4b!Q(53M zAJmMk%aH_1<07bPL1$u5bH8VR={$iuRN=Ah`CWa_5%q^b$2)p&u8w3r_wl`&$|X<>s#*U?i!tN8w-VNCcpW4D)KutiK-~4x4eJ|Pmq^LvPozHm0CfBJ z`Lua#C=nl_VVk?flm!mWu#uvN!>*WYy6Av|Y02dAB-bG@rco^K2VcF^-GZHDJ(voF z2|r<|#)ZG(eggZ$U@Gf^+xp$|$0bBaa+Hu2=Z|YZwbGr3Jjo%_q>8fG#$n@{Vwco! z`qQAK#?rMr&!5II^*@BI5NgyB2xOS=3p1FFE5fYpG7kh~)#3?p`n(a0K|LIbIcBkbGMg?_EMmV^x<^A%W z`}h}-2DlJLtDloPFVc{z$y{X&*&y}sFQk@l`{r!;Qyv}p4*TgZpqjtG5kq24v#Dt6S_11 zo}VKw3k1m06=GLEl{RBjJG>IU=w*fg*@JcsRA<9&^yGGwpZ=+ceEz6Hz0(I~p6K3ET&`!vG$p2X%Xf?g}7kX=%GfwH*ur-6J~@|x!v5$hNFe5IQ4THw4R5 z1@Vt<+%O6Elf=f;rjMdQu!k7}x!KUldr(qvfS69p{%*wyBPOB_;O}Hp_2!;x{rAHR zR}R_HiNo-_@Y~6k_Q9@6vdFId+ErZ>0IqTaQH{LS+OvI6@u~t^^z_evP!2iiHM`$( z=W@bIXUwpvTe0|L?FlXJ+Y%IMdM`SFyLxwy=tMTqUwk6Iromg}<4_{16i z-7&&(JMMve?d;VSVaGzS$6q|?;gf%g3D*FL0GYHUMS>$mQia?^qysI*ra{TD-)LA2CEr_0KL`101%iV%2$mFZ;JUfC+dF`FjpLf{X51c zcvgkH-)2f)zc4+^BV{Xl*b^RBBhz2Z>15XJN-jeZJOYid=($=>{E#swmC20D^}Gh; z$?P29_I@4vLl4}?ITJaipnb=bH{)Dj`LWOp1@i!t-8e^vooCFm#x;+kLp2AhBoc{meg{-y(Yo=y>618Q{@^Q0_<%aAtl5zaS5K>wgQwyWu3-zEI%HqDpLqdp;z8S>@Z%Oihl|{bpwlexS9M z`jtTRh%YO=Ze^n_PhnLvYN^$!0e#VFc@0?hFLmaQjg(Mb8k{UPSFUExqvYTd9r>Su zRijE3(+JK2z*#<#Y%=xwJRbEm%pJmCa$v9i);#77tYY_EPIxe)Sg^0eU(dv|Wt=!g zEv5?@k@hRT#{&pMu3(#nMn__{{kyUbo)t51ogY`sWvrQ7r0EUM9~M6zOii0QO7{7< zYc*00?n11sk!TZKA(d|*B8!ZEn8G~^Yb$QXCdb-Kq)EWG&Qp-gaiwZslf=V63Vjb% zzUh*f0NL=(Qk&oPv%Uuz|2cM*+^)0&M=dK5_6;@0Qj?D=#q52&;Rj1dbz_tZrvOG@ z2RBBf#LpX(Uo7*;>K7@g%sH}A5-d4FtNd_L;X{weD_AOjEx?Z+g$wK{WkktfB!^;_ zI((M}Ui>>@ulzWnJiyO%!Z5x_cwOu(8_z|XKhj*-n)v!nMZIL551^|`|jA(RReoN493cD zNAFqpRd%1+Ms#~H^o!dwr02FI~CxZOqW+kkDkqp7?? z*HY8TPYrBbB#5X{HAB^UwoGu}%NH-~7vppd<&N0eqN{I?d0)jjf>!N&``i1Dsfp~Q-znEqjq7jf{(3*Efx0W=ayg*-RvXk z-BxS|x+a03Z&25EuRhhs;b!IBFAbaJ{a21Z%U?{}_i9m=`Fx)~^;mWuj>v87czd6{ z_n7$PhI&sLa^2WhPo~Fj=CXQ}jY5}Ua___d3uDimp58}<|E|Ttkp_>7W`-eh&oC!- zo&1nb%l|e=F;T7GEAcR?m&(OH@;5V2IlwW$R!R7T4i%#^bEx-Nyt?`>jM>c)Ak%0uQ z^-V=PzM=9BX}_bIP~Dn{TWb1S1VyZV5y}MI-|m1?Nk3KW)eqU!Y0ENMe*sW#G`mre z$EBfaW6>LmOLXO;q*{kH&>LcIpb-&7QY)VHn6`)G!#gH`Ac?3@`WOPsz_av{p&yrUBJr0umYcxcGMd@PYvjp9?R=Oj@nG71fPaoWjtl% z;kmRX^#WW2Tp|($O`kg@^F8A{oncSzG&SHJa$L8}fOb)P9?RqkH$7=)r}ARGc^$@W zydQR7Y83V)_Lz%7&Qo=%S2C~0DedlswUCgn%T1KwaOD_#YE5-*Gjj6i<-kU>=kU;yo)I!d z-AOB=jy5{e7DBP9b?YEur@y4(LE&B06;oJ*mRf_rbmowzC+^Fs#lh%DPiuME54_^d z#4P8c%ugd7SdV&+IU2DMw0?2p7uv5Gz<2PN-L963x^!VPF#%bR^Qj?c3c{r@g?4D! z3jWs=&L)rtd11<+gVD_WmA>!0rC&Fgj$Yj|$Zw;rR=s${bWw3v@}==cK(k`CPu36d zyO`Cfw)Q=v)U#>9h#3CGZQ+`zzIQXl7_NzTf@+IP+0zG7Qq@(b&HD3}Dn~lp-#=5P zxmQff|C={-`+gCNmiIGekT6t;N2~&uWziX8s+RP+@MJu|DJZ$8wWp^4NL(s=)2R>eEqF(7wj#dqI2NpsaW)d9{~C*9`{rcbmhCFzQ!oEgF=|=! zMS-)t0UJK4wFHnb^p}~?=YKB!v91rV+2RA@$atbW_NE#?SO_dpV;JjKxl`dK$MU`E zYEZV!@SpYs7g2t#rbA$Vd9tUPypa=(XgC;M;rxQYKh+wh6@xshDfta3y-2lc`b>^t z!gsz_^JTfQ_^tuix#jKUkxXvH>B;_+s1wWZuDI_fS4 zAriZ&q}h#(L3JZ5Y*fsAv0@Y=+&-2VskTMwM*2wj1!u97Nzn2zKdbmM z^*E`!)8dAi!ztlaYI z=DNDC3*ObHyZ7S#TOKAfK%XrnKh1^>uXz8GS(^p@tAAwR@`#Cn6EsPtxxx^e&lax+N{pmc0wCKJ20F(*sy3$zY&+c&< zDrKnANV%MN&lZx8k%d)nE($MP5*KRvQR?!8A!En1vRVk%NDIAH;C_hn{1URB^$qI+ zpyRVM0aGn2mBf2=R|LtNn+8o37N+4{Q4K%6A#5Cqx~Pv;LM%c<&Qu z@7rHS<$PqO+?IbW6(7co?-;#orA5dX3=s)+vR)QLzkSF&81MoM16h#vY@F4Szx4z? zc#l|rMtW)^@W~HPxF!b3?=<;v$_U9)7e7rS&hk{jVQ1fVnp35fTajWYp;Q9eEWEC7 zTmWp&F>u>|gGj-el^JU|KMHqQOISNvlUFJm`Pw5+intt;aoQWZ}uU2>?8MOGpXYPfTyKF`#pU@yI`i`IUHNekvqixas z`_x=RZ6Fbi4bi|AvqBfvs>plbZijfu0L^OFg6V^xmiD}vU_oJjKF+}LoH!-pAPk}W z@*iyCF3r7WAj44h*igs`kvcyIB4Pctr8o^Nb*|05WxX|PusL=;HTiZyy;O5isLZol z`@OzQ6RT-s(r3As*!fN2w#2jz=lpkpp=H=m=xU-?Z&tX@2eWs2`_AcmT<50>liO_p zxrKj)CZ4(EyG*dzmGC@7pHsWF@C$loT0E-?m>N_tQjUb)WG{{b(Asg6Z;q(NR}l4{ zjXt?qX<~5tz{!+3*XGC5obD4vBr>y%dJJ_hxOM79i8cXcye$j!O5JRZ#t>fdyw>kX zy+$3v7tuv?kG-Y>2tR}isMpx)Gq@)mGcTng(9KhaKd2l{Y#G3Sq|~*KINwiImhgEkSxQq+b;>x;(ZX6>O62xl;1TzZ(JGRR99-yc@t}Quh)z)NZWuYL>yb~J|QKu}?m4V3{&d5RY6 zty3?pML{Z%{pE!9M%oD+C#r3}2aZq8S@)-nR=e@5D`9fkCV#azn42PfFKkATZ%%>i zj@4?(TFM=D4h< zXa7n^RPPn?BDIf)c4-0WW-O0Yd?#gS;4vVi}=00JX_Oh@$qGpw*WOQhE}h&ZrW}`FDpiI4Y{3TRCN8i3>s@ z9IoX0@vFuUBi<^G6n!ERQp?H=U|I+XXDBY+I_MteU1{iKO6@ zsQo)uHBuAW5v;bamVdOPL-%5-UIE7?BLi7iSHzzu(5)^GxfMQ|5)U^dNYU#D7aWF3 z{~ zXP_pt{`P=+Fn!30wFgkYGus(o>}^($FZ33>@ti{aF5L7Q7=+TCH2&24<-9O+1HWq{?FYNsKN zl|8EpTS_1z`A~553B8lDK_um&Z@y+I#ylwX|6^DOGP~Q1cY*sLkuD^jDuB@+ZZN%i1v4l zm+&J@E1YYfM&p6{&>?v6Y8(9Aki~{S^%RD<*@GFg{vAMt36^;D4_>O>`7M+x!a^R{ z*u-c{y?D%!ye1HZWbNbJx#y5N&hx3X_SsyJ>b1idiSL-jWAHmTJX zGg;?_X+T~7_Ro=q=T+qDa_EW30^z?-CoL2fC%MSnV4Uopd9S>`ip-?hk=@{V%q4 zVn>Lu6D%$lVqq1>v7g#L3K1|;g}-o`zej)hEsl{d!oq&Jm%jvf0D+~N2}>kR&Bkor z7>0-LtzXgdro8JzdocRd0aofbaezG-<>rY++%T$LVptO4O<>P?y1A#z`xPod@jFcT zi?xrIP&9AChHQfA+gebk4=nX8nalBf+T0&=jB&#q%kPZ@4ga5{8hN4YseE+`IV~-H z#g=S^7`M5Ct|;t|@90_4fH5Vi?w?shmpA(0S3=hSN<~6^T4_C`7!Q)ikL8r6HiHgI z9m=r^Yccd({9b68Q-=G5m4ALEeo!*bF(i0`s0bCZygTRH<#Kd%YCXQ#DVxx+UtSjZ-u&cP@h!tUAIgNUj0S^feolzugem)D#{j9z2%G60 z%mA6#TlGJ(jO===TgGJ17r-)`z7rxjbst@7`_hZjAjmB?z7$HBWeRE2pP(Z#UkQ+_{fK5;wS zvl(PiWmCXsz9j`$HrvZEL6KEPH!@pG&Y%eD=CPtzU+KuX;LYLCXIf#juKtcSfw5!@ zNQPvGO*cW$r_6Ekoj*dW7any!@%BH2c2KhCk}S}`PC1T6s&3t)NF1Xb@GLzjb{A1O zh22%Yh0eo@F3{`w?!HlA$2r1q$w z=;%|A9yw^y(%l(UO8@)ekMc!+Kf)NdyXCCEYKr6r4k{C)l~789t@eI_&zu*566} zXa&3I7G4;&h5ClS#|&{28!QREW%SPFS95Mm>RbKjh5;v91UfPVZXEnl?e^vZ@py?|yx>)d3NJk@9-h&;6OEetna#Ot3j9Lgm;_tqmeYUrc+a_=UdABF zh_uKIt3(#7S>z9?$K6ZDtNu$5dBw|E|7`v>d)t*ie?EOk27tY#qC`yuWvq^$Dx}Rr z*KO-nL(LlaQ-?~4FNz4IbRhltzpk`m$~Us=cnbrz+^A>XQWv|07^NmU;VvJT46_yG0Ib(0^S}c#4U;g-8 z!U2%8bD`*f>we%pfVM!7{)Qh#4)#)pnA$|q5nD%>R9y{73SO3g@LW`G-$j2xJB66W ziPQV{_jgTXMfEsGTC*nvfl-%&0CFGU^%@tf$5^W5b27uJ(#9{TdId@ zO^S|Pev(}5lT*7ae=Ie%Nc~~Od*7<0^7L!}$`s+TMD?HSs)*4Q-qY9U92i?vx_+y| zQ($V|@(AKn%ZRM^|E*^@C?<6MpuW7%TW~x8QYgtw)3d&G1rZ-}EN=3|z_F?QCAI?c zkEZfbM8D;14NYb6@<)9TrT%VtLb(&SIuXoa>&5FI{R3T-e9v37oRl=STHh)b`#efQ z5HN1L3e068iRPhoBX)3jvrX>!*k|_z+i$!cFO-P5e=>yU{k&8wm3k*BvJu(yM>l9Y zuZ|kFT|>GL?rr(Lx~@3jj?CvycqOE`p2PMB7IU!^_vrj40F=7;U+Gt zM$fZxzEK4YeXa84Lk28GtuY!a~%Up2g9 zZRfU)lTmrARDA(Bxp4bQL(kpwa)owgLbMy~i7taykn=whjAQ)ioNZd#1l{&`a!KV` zAigj5cmNTVB(cDfN%m79^YxM#==J!o37XHz8F6a9H-22B!Pyr^>4WqJwtWz5Sf5Gn zeyg0c(~{OA3vuNBZrLPC}&4x^ek! z`Kvn5I^WD`89GoE#iNLbGx(PqMeT00Fc%W%=Sa)5;Bif_fY6IL6><{YU({n2tvP&y z)*LW$5a-8g9$h}4vgs#jz=udtVxXycC}fCbNIG0jj+rDUV2k?x?knjgf*hG znGZIAJq+X36&g%SJ*QFMFUQY}HHCzYG;=>R*#C%NCs3cL(cQnOI@?+T=M2Z$ zWJI22tJzdPhPB1of?Ajvk}oSumOKPuQ)3R4Ro=;4THfW*R&$=VrVcn~_LErOZK%Z3 zCxcX7cigM0s@ktU(F$8o>as#Fr+#N*%9tcmg>RqhlH-4W9ANDq4A|fofMGuoq_X*% zgB%i0mY>51C_;aIJop6B<5yQlcKuH>>&DZf(sxAzP{6qkhHG zNufrceCNZnyMP;9 zexPQfZ^Ggf+tdr*3oEB!Wi}!)kHZjrwY)D9 z9jPdk4d5Is=hgB^)Zaqe83=i7=`yjm`9_T)iyUO+iFC+Z`uWF8C8P8poYgXnT!@h8 z1^!UE^tqf{&#WGW;v0RJxI4wQAKHcA%Ql0Z8&@0lN;i4SyK;%MC7!LA=5H%-8}uqs zs4r@4>Q!rHNv8^do4!cZh_1!mW$C~F9^y((@+|}D{}HwMFrAmf>h!cGY{|Wqxx@$O z2P`-@HDApm)A0H_SX2VWz#wuN!!ui6x{5_-JWx zLoVFMI>KtvC9V!?jjxxr3x~RU^k+=d|2f(>mNd2wn{U}-I2t++5P<_lm|9Jc$i$Jw zN5(e{C%7Vc>)L&86>T%s zbCK{9UjfG4whBT@U=0WD!`=D<>uP^gp_e=N3=%lhGhQkdPa|P|I z-FubEhP%n@W;u1F{jx2T1(j__`A!^@8@vA%c;wqxHaGDun`~6)sAUSH`q%#3LC>Lj z?5;VnGid_Y7*BAJgV9otti zGcOG+Pp8>4BRC2G!6O^#P<2>%Jwz^|*tDb7r8k7^!HlS0>!sj>K*xI;FOpi@eZDWT z)k7`WN;~PaGo>GXx)`D=Ek7UUj#r8;vi1F`m5<*wZ=U}1Zt`pEy<@2ko`w@Lq%Nu+D*NJAWg*AYVHbCpOPQxDZ`d#l<^@DB&3OJP}8^MkgR;XXv3 z!iAY=iLz`TFr{h26ZL_+(U5;7P1RDZ#IIRs>23w8gY@^H?jIARGxgaVMA40ILTayy zzQjE}-kU6Xbt*I^TzmCy9Y@RYr}p?t-u_XBSWC!RF&E{fPzM}9LapDA!E5=@YEb>d5r9!z0ZIw8 zMh>*T$pL)D-_~B&lOFyi@A*l=rQX(p6LtC9Sr(DKzL6`p-6?(3QuvJ)Yjl@fO9eb<^GTnD{jwwgv3h!FojxS zlA(f#hWBVXA@7UYtuL=q^JLEh_BGDJ{jlHR$G38VH}$ZZJ>Jw^$~rerFy2UdaORpCJdrv(v@&zD z)o^~xQhy<#iyVG2-z|H&kJs!HrkUx+j-}~(?EbwvJt{?~53YX8n52HYaSf1m{@>C` zjYOJH#O**8i3YZDnm=kxm}+^4@?k#v*a03U0CHlDG~6&HWZd-cdk!>kVl27r!WI1a z*@&#eG}MhhxlGZ@q1enoOzzT^S(zElF_F%e`L@dSo=5p#J-yC=p+wGf(dCX8K7f-I z;NNk_w_XD3Ci>B8>Ti$Tu%UNqN>a-^V;5py?R(`8tO-g)EJ=NY zgx|w(Onx>3+=`S=5|>e!1QI)P%jlmn1xQ(}nPg-_djoiHFPprqrsd7v;{`{mIM>!* z1FTB{s@dIhs$$Tv2n_6LJXsTg39adnq#L{+g;Fa!Jt=kn%~4nsmM@OP(da6dav`B3 z9)%h4*wb9I2)(KJ4Cc^17KLG?#Qv84&k&(MO+)CC4X%irD2HZ1U>%`x4`h(QPrJZ7 zwWkXfTe)P4ut+w^QM@ncz-;98gI%WTXLfB&zDSDN68n4>JBnHn-}q8}L@_{2JYuqZ ztts|F>##w)Mu}zDWX`rhz4Omqj76Dd?~|5gekaVWxCmjAZ-HpwzecNmtCcXA$(BGR zPnw}mpktbb$Pc<9Jt$zU5HlR<1A;aBa?bw#Y;|$y99YI+0if*W$m_e~ru@kze8BzT z80XUC!NOYX^B0R!Kgxh^E4K>p2%|tcnp@6^yBciKk4=cvifF*k_DtsP{ocjC+2fM> zLHRj(F@KmfPQG}tro#<2r!X=x|q9bVAW+?pBe@D_V0&Snm3lx^hPhCME?(}-us>H zKWzV}QL9R|v{Z~XO3m6r#HiYPZ$*rvM(k00l@da2g4!!;uPUm8RwyW0Eyue31vym^=cnoCXqyc=kCOn)l1?T^M*R z@@BBDVoL>cyL?t%h>UeYg$JalC}|^Ph;Fz_)Ia1ugeRtBqoV$71QU(IXU8e|cEt|# z8o$OCqYMmytD--)Ed{<$Z#W0OT9EHIyagffjVnPc63ap_U!>%dNf}XD8JEZ^N9(%R z5&8pG`7KQfm2G;u{03fQGp1Rb3Omf3b}^b0&U|RdHCTmLqffzL~hN-W|j#vOfpXBus>WIbaUkqtp z;8FieBXv;t5^37D-J3owsk~x$ql9A5Uj;mU-FFMs_rD=?9?107Qsn z&x3*R-1})MvPfGuNj-%bVLLFFZ&zMl91>hh84xYz=G#6MsIgE|sAO+xN&cUUd|}_C zuaFUUp)F)oO1FPbl(h?Hax;8}KbQ;GgtqG@(NX8hsJhZv!coGqQI?&nYC~dV!Jj)w zoq9i$I)0{h_a6}~4!VCMwrJH9))`xDx)@LKVty8wHI&T-R2Coyh3%M0dTFeaSy8wK z?L>73sGpXWuq5NJhn$M292#FSwOnfZbzQ&P)&gGXGm!T*AD6J?Ac8;NWSk#JPUL^*^b`T|$utarvZGMKP}3MIt8R`|HiFTHC^ycsgKV-8IEM8QFm*U-P#x^r}j*+E(m)J*hh!&m33yU0&G z`~kq=LeY&ce@k0@Q|nW9OmvJoVUF+fbP{=FDZ_;)nLW6bskJk~Qb#oZ5t*NrB-DLf z&<~sR0hcWduxG;>$Cu~*EsF+oGNEw;hpMG$3A5=%^u`^e8i<}#R)5*K@$`<^{ zCg-x*uVR4+&rYcHH$6Sa-!!SFM<70?Mf&J{WAp>SU}FE_h`p0|vtR@;n}OvX=g@UU zPdu%Aro>YrpOjn!RsYT0S)5!@iadiDAS{WHJn;gv+ks8cpPm>y(92lK8W-0gp(*_qYRo*;3;%_*wlH8f-2?M<3FOK;p49?q(R4~1NoAUDdv`f;#A`7@?}8p zFQNw6Hl{#M7hh)5`K{$Eeb2F{%tz=6ah5+(-q>VtLT({#JuvFD@2=5goGRzH_;q$W zZNYKkiH3qiwg@Tv@`g&cKaVs9K zPnYrx&k30MSCyca-_%QFy9`&#haI!r<)-0(I3dm)TXERSeiSS2qBA}b;0i9~i&YO} z0ztlS-ZOpNG`c2II>eHD7{|vq!Esknh1KuaROPl-77&K1=H&0KFkGm5Mjpv+WgCGn zUA12JTzHz3KhQ0eqWT;vc^Do9M=$QU) z<4O&!9FtOW9pKmi(4{!ZPQc%-Pwa|tU^jG)spMXTc)7W0$s3Byr`#WCOiBTk^5p#c za}zMo#$ZqOyy#JTANzuDIS|)!U{MJh34kysAm`SnP6q%ptW$!(WGj ztIY)JtMvHk?LV^65S`sNj`<~9pPqgUTKYygs`KqvU2b`U^<^SXme4-x>}cwYff+{L z!x3F4Qco?FyMcP{P_k83O#!)lnDOD|__rMIqP5A;F2Y32aPa! z&cvD9;%ZDc2zMnX#kmjK%y3m=)ZLGM+I-cSrEzfkoGr&gm=N%-cKo2|E&bS9C9nPm zwbs&FkfUDM3}7PG-71F7!}#Q^IFX^-ddtH7|3Avu_B-g_mAO3Gy$s7Uw@1?CUnhX? zfC)zV& z9-44XqK>*d7jIlz#^h&LQt6P5pP64(-B4F0FN!SdglMSRg9FX`e6JzpPyXx zDPLcpJ*^k0m03zlSJgGf)sug&vnwqW2R`sCE;O`xw=2!o&RD${2IM$KCiJxwKCl~1 zrm-Ts81X@IcQ((6acap!t8|><*eyvmO-a7&wbr}CglpyB#lms@a@BSarQnmO)!-e7Y4y3AjQwslZ zxH$~8RH0TrNUOU0`p-|}WSM6w-evB+%0j5wQRzlGg<_u5hi-q*<96o$(#sio)%0I3Q@aJKOV`a? zx3-s>SvK?DUSHJGnZl80LE8&mEuTAId``(~e@~VC#AkHhIHLc{x@xa*{zZ5aX&_eh zXlm27PwaCIePM>}#i!LFsfdZ@WSJ9D^VInPT%+|Co0iCU&3#}2d@mlmxHskA^~`Xf zk4Cgn$sdxTXBekkGP2Pw$T(0Y&;CM_!unU>q|}%o`QE(DXdciUL9H^XQf0pPMG~em zWGEpul=)$R{DflGFrm&C4GDSc6fHs-V0&p$7Od?q1cmpDrjABdRek!}H#l6F7*I#N z6V}fFsx8w_CTk`;E*}pS_u)k1OFqKl&N$~vDE04@l1W@n1=gwTgk`0A$X;JgKW7-w z_kT|&hTvn#dU)CA)D$y2O;t?Rm&SL0sZB$ zgFQ`PQ%J5!Xl48Zf;xsAlhY*HW86Y~$lxibMOWw zi=Pj9%vmp7J7k{behyawPB?+?%m9jigqYUJ$|QIyT#cX-?5^TMWQI!=X`r%OS?);@ z;qXMKyuteeB^iMm4Xr;#RP`Ou5^G+i6jM0@8yPl4z<78(WmJolyG%92oc|Zx{1KvK zgv+}7Mnii^$lproL%d$(umt;~Ev1*9OjazpHzWK46wpm|+Hu`b_(Uu9O5?=gOlfnY zN88si#)J84Z(qXEp1&8JBblo*@&7CHz4%OYNBDBGhc=>xrc`~B4%be+tpPqx*&0z3l1ET$ zdd^FfA_PK8xOr)h0dYvM6absT_xK62;i#mTTmL4~kNr2uhszssE+AhRj04d0wA?uD$K0*D{KoY`=GS+di&ZJF>m4Qw ztbH@e`5oWmQ6<(L+V4l&DJko(KYun7e&u)>TwYdG8}<}%%&y*Tp$rTR^e+1g2L>*@ z53cF+2;#}E@0@1QkGoe&6|Aim3df7isn}UzNgeT|X2Y&=WZF{Q+y2|mZ|w^XwA?@p z1~|iT-(z7{4j(%T2KAh%K4sCwihdj3@4>QHp0A_O8*GkG7FS7DmWuOoKbSPZ=sX)M5maw;Ib))W*1wS3)i6&M z{Ui?|UwWav+OlI-)adX;Pbw8Ey)Sq^{VCAbCfL0edHl4ZG@mH1TB<%bznOKBv}CzT z4f0aq4Z~Z{fiju(kEb$Gca@VOQ2y61ExzH{bnhrZNErIrTxAj@{USYxBzTDd(kEvT z58qtOtFeJ)tnb`r?XIDW$>WuQgpGJ;;X_No*`4T{U8!)jQ$oE(I zN!qHE5z|@YrcbrVwvyF+F<2_#bd*P~n;n^IZlJ-x5dJoK*spjBZ~Q)G$f8a&;3bqA zg01*vn%O>RsNnIu_SnG#{m<@CYz{lPW77S&VS2?zvNWQx+A>jgEPJdis9QEsS4o^GNsZZf$OlcvR-jR7M z@|OJ<65l7NhCC55fYp!hKNf(Wti`#rSy92qv95XOxwo(O*BLAO6o_)J|ELa{GV!S$ zD$B#>ss%4RC+fb~Ef7mldm1(0uMm!y!A1@w^Ma^cpq_I@M%}ueBRWdY6sd`J9Zip3 z7IG+35<|Dq@{0M^4QtCQb$Bs7l2Bwz+(74{cWT)j6K-qAJoP`KPnL(LiZx?IVF&s( zFTT8J!K~ODHa&)<<0Q7sCvgW>tI$CEhJ%OOf6p3RX)5pBwU?B{-K30NT=P^U*VXpK zx@w;CJ5Gv-YlFc*D}=YQwyv$o1(n!XUuv|<KJJhC?e=q$$<_(8NnhM&5dTdl#WO@#nQ(PSM;_^q7DjGT}J0iO|) zPdSw16E5*Hu=r&hy+WTmDfzX6S1@Y?tw^M9N?ogaqDX?T)q|JIH_n<1e29{~ zd6{8Dt|aBU9kc}-1&;>I{9hBFCVW2he@(ox=I!^-yNDLROx7ETJxyMBkNX_95ssq) zfeyuBchaCh-;#%be>A4X5gJnNHA~1JR5?^BaeVNFuh~uE`8R^thAJ0vnS(Hc<*-T7 ziQn(UvP=hOYJguB#RdArC730ujPv{*x)|A)fy)lXc3c<(mS`kFOS3Vl)-Jmj3cah1 zy@tZeFfP!>xw;Fd8j=`CvI5Nc(@v>hrrkL8D{XgYGPCb4A7QcLfTSnvxXH=mQWcek z74C1}s+CK<|ISQ!(@J=Vn-}1&?TZuiG==I+Ad6=uLsNb;DIrC89ZAVrGpu!7z7{X<3gR_1 zWM4*oMyk1nmHhr_6iNe0c$u=7OfkzU02I(0H#}5%7jNdW7~dRaV`A?)4C97g>6)m& zFw3w5YLZUg%&_`ibN>t@kO+|T57oPfcX{|hM=GP#b1d=5!`fk?mp)|~ z*Ebqwjqm36$$S7_p)%(LgAU&Ejo{#>{+)RJ%ZRM#NRwRpJwv}y<-fIa1WRG*WL%@2 z9~Rvmef~B^Ue{c~fwyz>=woJU57jHeI3T-zn#NGh#q&kwxz4KJ-L?5+F1>U_&vnV+DoGs7dxCv5&`qG*{xyXGgeBIl)Q0=8LSy|2~422mnqKEr7+_JN`I#i4kY5w7F0i(4i5&^z88ATrBT< z29aSwo`@fba)#3l!WzvU8_g^4yw=1F@7lHir9%I8s;;N&894YW`)hH+*RWJ|b&oIV zhW#ZrDjyEtOUd!(sQmrM@RiFzk!QhJh?DZ>THf1oZ&u=fT2F4tFIXusDxvka8sQib+GyZ<#I}>@I*-umj za`t14CuQD$NSC$*8xC)87sMz48^8cV*G%Xh>P5WF?@umn>QjTc>1{(%_KPJ6?SYS+ z!AJ_=N?V|Fsh##uTPj14IgbF8n6Z1$sJduVrXOu0b_ONym0@4xVvgqS5i_+0c7?7kVch z9QWq4z5KgPRz2?E6)iE_+$E(Y-}5BY(=2~tuaTz0>2sqq%9(f-ehS`M5*Q0UPjH#K zN>h16&@ACMZXzk!Bm)oKF1*Vek24r>qDZ0Zl=Zuu2@2@Fma*gio5;)Ka80oPC%Px} zbB02^H`)u2&&Bu(2nf@1Lcg1M%l-=*^Jc~Mk?mespCf~d55^|sY1*1?I5{K~MB>e? zApfif7(n=lGY&Wjq_hJ=RE4ylTM|InN$~(6LhgC#$V+Aqc-&q!+vC%B0*Erh zk($808)LbhcHogmkTZV`(6wc?R1lXYq|adUh^YbUNNTvsIC1gPz7=&RveV;d<33*%C(4ARO!O36A*%BgJR&l9Se&=TkkHwQXo+zXwxo=KwS<8qLC&G!K6Db>=UPY8K&)&z|QV?dSwBmKAS$06CE z$DINHTP%aZ3BXwZ=axz{DhgS3Lb%+65fNNVGa<`P!F5vktGApG)ROn%GwX~|dbd!Liu)b=nu z)Apb_BBWo8nqzC4e;6~)9w>UACBpHg_X5rA-5N!zxpVxL+da>-cB1amA-G#&M;e-* zH%Hyc*f0Tv3KL4vSpmU%-AA?-;@oa_J2rpo3YGaaS zNcDM;kyn*j?XY_~Ey~<52@YkKo$5+fB?*pnKlJlJ@ocw6TgLv|)Fr=HJ;KI|%!W65dv~Q;Y*@RByruC8z{`X- zg?cj258W|xrWBQbWXB`Pw}%ZrJrp$MUQ!G(v>3%0B|`$mAChzv!6a%#gTST>1OE|8 zpUFqtcqx!RzM!;FS9yz zy1GlhPsDz?hx3qQxe?SYCAmX!Fk(CM1(BkxkgTAyu+mnobaPJ`iTPL1O+WB%U%gBz z7(ch&&-p7Gp*k&R$vaqIuM{KN*s(YDV@0D*B?ovjX>#N^3Hn&nY>y1yVe7ZvvFO;# zZO$+@eHpy&SMkWgyg;E@E^3phrMf_ekBU<%+feE69;um$OoMCJ$EfEo+h9K5t}^jjy%ux8%piRAuF2<7V?%DfzJT_s?=KgEHR<>xi zz>v#I!BORb0pKIU_zg}gB3vmpm4M5{Mv~+#uNF&-zT#hC$=6mDzm^~;pA=fnF?t>Q z-5P~K>Q@y!#uf;8D1zyWZ6mo{g*=>-3)@@&Xo@9#;tY$w(tR{`QtUwt-VSsSnee~F zaulnLwYz%e{UKEP#$%chd3_aci8CZ{W0sPu)uwDmaw9Jeg)3_QaS;xL0DuwqrmCSyf;AxyFbT$8v7v*nHs|;K+?~9|d?&aAGrt<5Sr87@2uv!jGG+lA{BJVwzR?&5zci6ob+M^m*+YrY%xYF;DZvJgzAcN zC#L5(B=i4M@XSbvAT+9|>AxzB{Gel(Qj0pawbfddRW3BH4wpk7MKPwZtgT_&IMVG% z(x2!C<)@BrxD;!BJ=imjF_^$AiOo48$2e3*;kWdYl+KkBPE&A0)ju=%+O!fqhP1z# z>0fPvoyvap&+cjg@+-xKZz^oo=@sFTQj~l!!*vEeG7 zZ&UAzV2@#UK;KvJEacUxLRlxZ3;~bF2pBSKRysp-osgtYF&RS9@$EYwZ8@S_k=&ND z$K7~>feaVc(XHo$7CucZqn09 zKrgKwP%P7 z{`qv+l4;`G*#)|ov%(P$q7z1bF^6w)zkYQk@BCr|$QhAoBRJ&iQ@XKj<=(j`VqWAAceP8MdX{qDUXE&}% zUEV}MmT8n}%MU%ZQx*eAnT4E%RA zXL6w7D+7Y@3*9F3s88z3xj{-b1KNr?pAnPLmEnfZ#^pUA2u3rV0gWX+ILi{*E(wl` z6LTw{>I3Q@D*o_(k+og4I{g~*o16?6#%U_nr;9AOMf5e@D-UQJHvK)^T*#q-PT)ug zLLF3|$*>0Wp*vko3T`9};I&qjzf^DbHxr&cbmN5epoS#=t7!gqFT^pes+Qrj{Ou#j z%8)yGW5-X80SU9rtwqoy*^{vV#WezEI7TFsyF*bjV$)eZHhNz;0=dmCW}OT{+~*Mo zu?IKa6C*OuIW6G;J@$A_DD5TZCPhlvrQ&>9btZQ-CK~p3+2J!M=rjgHdY!%y${*Ec z?-IBs-3)CNRT#D~mUuQRrOsjn#tS9jZ3F3xx{y>mSd~jRgeI6LXPR!;6#|djwC~pq zs?43bh;6|^D+*emnWLJ)$}RXEY*FlBSp0fW1w~67OJ48Hj!{Vztg$hP$6iyMI%a;y zqG5WKa1*Bq-d==6vtGVY5hne$y z_ceM!9oaZo_@NbSDD&5JvShM-e7#(B$gUhI;uZv z5AzozYW2r*``BMLQa)Kn32=Q}O2rGUG$-3bNa+J= zrBCXWBi;*mjt%#8@Se$5^ibAyfmaVi*-uv<3^jhO0MPc26+fPQeczkQ&C!`V(!C3P z+dqwjiyZSJ7ryc)9IYKu`x9!S>GHez8UkGtf{rku3Yxzw^=_+7f7|6EQBpaF#|Vbt z57-pVpS_=z+{s`Q=?tqN#vM&d=~A{S+vQ_p!DNiE52*?f@c;F*wWFN>pQX89o5oqO2+Q3r13?1 zHtbGnX;9xHCCQY-`T3FWzSPWZY!1iUo$-XJ#UGtMZM|}nt zOvc~NPvgNH?G~hu+ozWH?U-zyol6Oakl%JjSCfQ@?Tj!G4O!Ha&kVV- z4`2;;E{Y_8AIVzKPF=Y@q-{L97_ufA?jKD!AEv*r6bq!Sce?0(IOe|JoL9V*T?2Y% zHN|@fDcE>_xfa?bLM^5GaJA^lEql}F#0yXWIs@vG>9pleMM1UxZ;=bINu-WAHK}r+ zT*<{nH-1OM5rW(H)Lb*REjk$sZp`Ch?sB+xno8*IhWd^2NcuvKC@>+W^2&@yMVG*7 zMR;e4=a%M}r6&r7zbAm_{39ys`1=7na2078TcCwA+1n?jS+{g_`i)f5%wfSVoqJfp zpB`aS_Dp~IAQQa(7$rB44u1QuxiVC#7*j3N-Z+;WzL!>5co?-Do)ZkaoZ;aji)RhU z+~SDRGERLVwT$1%DiCwmBmip?EOKL66FZ9?7R07F6IE$lHa+J)k$!$OnG#{r3ehUB zd{XC{8dEJBN_*1l({0L`YG{R?i z^T-C8wUH7Rlg-$eDg|baPi-f11dLNbxK?%vJSD|Ei*_T_6LC}7Je6-$*B*Zy3xs#E zh4x&|P*x9#Gm6M<(l;anh5XL&=pL13f!Q+F-kG^pGvn*4mQ@Lt1GCDO2vB}rDrk^Fo(Q@$0abDlZ_eTSTXvOwGE9GH3P=v>y|?1DMf&980l&e2^uYAs;aw#X;Kejg}2 zXKE^sSBd;sSP@H1Ne!_nj)^z(C$T4Z{OZ|n!X9>P~b%`Ul?=g(mcDD>6 zTeQ3*pdkShS0Ye8Je3*TUy;?LbgDqryEaPHN4Caw_!8XJt#>|sUsd*)D2_oF?KFm=WgU*_51hbI(4tddS4QqhgBF73gY3&h9DBk^aqbIIhaDo0Yxr z!TAmgNg76N)OdQjQ7JO{t$a+2?<)35nIV&MVC;R9GSQFCN!Hw*{obGf8diw*Q_&5#sCsCVz<9Y84Q zOmtA}`{drbfM(NCk@<7Hc&_W)2(2C5C5?&&o0riu)^M_b=Hl6gGg=MqmEUH(Bjc;h zs}uQkjpB0&9bepEB=H9kOXUrA7($_Uza|(+rT(ZO#xx(5>V9ZvoQQ$6|7I}N?G7m? zO;aE*Qr;ekC!RG-kNu$Ua+3Xd@~<^WAmIgbjA!Ct8cuzZH)d|1cRX^)8w}MHB+&Y% z&G|d3n@V3Q6R16BvgS5*@vgf~lAO{8!)JnOk3EJ}Y&=w?QjFfyW!f=r#xyx)MU1I- zl!Rb@X6(mo2N=oEuEzZQ^te?Q3?2Yt@vYDFFHsa##t#^qm#y3bvD5AM^H-U75S36< z%KmSqB}yw!KR)~<&eL?NKnY$Le3n?VJ>95vFy-gEXqBqnD84^NTFE&QkNTb}pevxH zrK77O)GU=_j~b>J4f&C8DYy0jLio)wB)9qx`!rs0ZeT)+?JPzgk(^aa#0;TIB>>nu zxA9aeQd!q2?ApR}f{#;>xlOl$O3sQ91yV(*6*o26&y64kC8v4Z9x%?9HD(ZB8&{}m z=|`?JZ2qovIr<7!2Blb(#+bRR3E4(MoyQ8kMLR4qS+_uxyRR$D#~Z2oAD{xMj^CJl zl4CD^0xLP}o32X;W95xgl#Xt(xr!O-b#nj06hapY~R{NCYtnX<=6IkK$eQ7w+!-}RyF zz{@zk0*&7#0e2UyzIFG{{iGes@0LN8M!f$!bpCGYVh#Di0G`_RP{wg09>Rmgp_3m<#=B;#9yj)(RhA#IS8D(=B! z@j@oq+>by&qLVc&Wqc*|oIQYLaVzq4?5*iw-xt=5uq4j|l{6`@eIk)kHW=BCGUi?v zOKU}~IQJ3@o6O1YK>am$IZ?|4BpCyX$8s3H{0JT@1Rqg7&zv#4St3-+OrxX{7|Wn- z34zvCRykeTBqiWkuL6ez(P6x&DbV7csYNZyUHK9~@Ux+@$zYT>Xv6ghfsWL- z2>P_AmsDG@%u;P^dh!W+d&m$EV_+R^E)7C(Un*`I@3YLkD@{BgB4Z=8K$X17f6Oo5 zF|?zM^KiO7_eM8klYQ1u8RhkNi}aMJHu)LVgulbXuJ46ST^$}+@Ogu?H+e_s9&cF# z)^NPHHMjJ-*54Pk`JW26eq;bnzgvg}SZe{}2|MeYxI+B?HI=*fj!0^+v#yzVyB%JbHW6#EMIY3WTpT@>ifH-`R> zqRYQfIzw=qPl)Q*wD$Lw4iAUbnPQp{d|DOx_wts>^?a8~b7% zT3Q!YhqF|~g=mwt?10#VSkV?Q7vU==EsOL=L^D*>s{awypLQ2W$>QI5Cvek=~{Y((G`3tb3O|Sd>y#K#rNK_a8CY z_RB{szt-I~gM{ZQM_j9ljoqF4_PSdxVV&5*0Zy{of(OHA4et)aB~L(0#o^QIDU_sI z@^N(!Qf7eo>JG{&=m2LK<4Ts1jYplte9(RSjw7_RZfQb?b5V>+<7Dc$RGY4#jU7Ep zS)`L->#~?oP{sO0g&@=ed{+Gg#t8X+CXwCO;~?eRYdhLSh0Cuzs}i@W-7%^Ex$P^*8nVu?mN%_F-gjSv!SHJSZP--}G13 z`Z&PO`P9F7#XSV^o&0#aj;~tu_JY~Ic$ld$Ic*DzDHXwai;#~Q4FCpC4HFZ2{W#v_ zCWC6tuusZ?#+TsIF^)o~USx-&1I|v9Z`+BFFRElL@#Bud>Hn%iqeEVV0X@5AR`ofX zj~la7!r=n$a10LtBtW0+^L`O+o6!C;QtVerAQh`>E9GC~ywb)uE#&!lGBSEe_fX;) zj@DN`zh|7;BW*m#!*(>b13VK)KjZ1JXHdUut}kym^YJ2~x4AFbQf_unHj;1*t4n3c zjW`x10E>HhV#r|Sq96zH*dsn;DKvNu#(LuvbtsH(zUq+4+Yreig5%3VUp;pKq7;-z zi3TuD8jpK|CUAKH&0ktE?UEy0clrndaU)$C7+;a5Oaf|rr@*)qpLJYx0BW{Xdm1)V z>WN6@l1bXpWPQeP5Z8K@ZSN&`!7mr~eDyY+@?G#xLZ6uC$~^v`ZW%M;yRto@UJ z@*=Yi-i0eK-(LdTn7w53csBI)Ryc9p;-n$!kMBi2(B-BnZ|HwO8-!rxmG?x-ZGl7r z96?qMb7Dxo)$8iQL8IKZuwl=y^sDG)U+CjdJQ?l7Gp-d{>}S zx88dC4;)QRf4^xjN|S$;-CS|XSoU;gS1iwonmsndUtLJ}mNXAuChuO`!%n>V;jw0X z>9pCUS9#gz!gBYNPvfP!S_oWI(OEY>8D9CC6)d6BD)-RfJ?@>Hfb4ZL0`4;4yo+&mVnJWDz`>il)@}!*a znTo4&?42|hTt3?D?R2(bJI|Br07ajlp*N>Hd8Mk;htYL2zMa1h2R`b%W~L|hh^7}g z)|#hve!IbDy9y2aZC!(QOqQ;8O${w!HZuPbw@UR}EM`(3Hof~(nYVR5!%o#~#0PqK zr2MmXU&9RBC?Oi8sTRHR?C4D{GSH?Bl=0hyf(n9mHdd-!*eHjz^L5JX6!7-j_{JtK zLN6z%9M}SZdZjKC6cnR>Q#91$1H4%9%IVI@s)bgrYsYckvG;tKm=jfXqGQ9rHUdf) zm%7He&B(N^9ILbZAU)Af8w{SK6LQrMqlJ9JmIXsP*AGe5p6yY+PA2^XJlC{R;cLYo z4y=E572OGo@+Jym`z_&^V(w<5umQA>4~Ln2s3Q1aPt~&vK!Pv(oHD?O=};{UIFgY} zMiXN$F10yHmb1c-BuwwrS~aWoUrCqJ@Cd2x4#hdXE$27f^tz)f*WE9HD}L#k6NZOY z=9y*xN7Ob>ye+T4{i;a)0>(MrMp;SWPsYY zGJfi-V_Umwd%>umIe}_hQpg|f3K8+us&YJ2+M@yRRwzF49l{o5|GWoz@GDVdn^3*Yb&DwigdV~c&JWVm8)lSJhc{;0fASFLPME z;wRS_eKjW?p2OE!o^&5~B`M*u{677{cNHZWwv`;BT3#wUBTinpfy;9X=4R??e1;hr z@(6A$cU|dGP)Ujq?KATAHr*}in7`;89(sCU@oN8nn4ll8c96I_4ym%}ak=dvXH=Rn zUpMWY-g$fYQiJ_xv)@hEX|9=~W(t&ZMVf27y2h7)QDFF{{bzT7A|u*K z!nQP;b!Qm2AGp3hEy_JxfE*`aNQ)!Zn$q58^ZOtI3_lrqcI=Wz_K5fc0K?`4$dJ6n z{j5iS1HBDcY@@pH5~5*l&j>E#qK`!!;C`9WHVr2V-jgA_kJs4oK6V3EZ-k0y1AU4u z^jDaRyRvPj)*w7w;@WB6Et&R3L%4E%3?R9MunTJ2(dBPT7N6WBrv;Dh08-|VzWUtt zy3WjHsc#g{ZejfDz7dFwxY&gx2@2{Sd z;LKWj@spir{}cGA=CpI!zy1cGBm53oy*)x22RG-}d92+?1qTMrf)?a8_F|v<8mBvS~$W zJRos2tBU?CQ!n>lLKD1lg2?s-mU`J5nWxufgIQl$6lz#r(^U82g+L&28NHPsC4?~!sHbxb4xx(DWy1p>9hvZZO*o#M&7 z*k;V0-t`6s9Vr6{pvk0vVe@Ei6b1cnx>-9g8|i(_#Mht9V3&`_U43x;rQRpjkkKsa zbzXNNzm?ClNdbu2s)ys#9g4|nEHpI}=|Ern)*xl0CrGj|m<$5e&XysnJ;n`IgxeIJ!G=*fW&P=1^rv(RI)^CX#^&iRWBTmX0 zeEOun6g8U5l2wDs$I8%j3E5MTfUMwg1;{%_0s%OQD&8TkI?~kFw|u;=2jE@GsCk{(eRk_04I!9pPj?B-&eQ`&9nPTN@IK-J6N^M}$v28$`)(M&oyr{kVJp18mCf2qP*}%krimo3ME#{qz zJv42BPYWEp-Q%C!QCyqW%pLv*? zXj1QoqM{q_ei|sx?zgZ`Fm{cqZ=wF2-sU&#!QGsRk~Y6Ot-j|E3%uB$q#fcMgfR;z zS}t4}io#ylw=wGecH#8Tttt zSb?nnv=(s&U_Q=5$%O=joR5>zMda+w$sUz)=gjCFWpA>R5bjW%ID0!>oRJm6 z_4|Ck-{0@?`2BzHb)VOJz248^n(S-B8|MrsjHZrp^Id{OGzmJ3ZAo==M+Aa|Xwqm) z)0=+2W-t2bHfyqF8h^@F#s_OGY#k6>4U%PjEmCh!sOw(Ox{{VGDa#`&bY&|*vqiK) zzS2!o4JREak?h-F1Do0MI-BPG*7UJGd9VFW+Zq=Se)CSswFI6`)g0qAor<4>v{d>2 zLpF)3mY}7x374WhTR=N$6)p12v%Vk0hG&4v7WpRgYKr{I&_M<9W>~4RGBH0U@aB{i zylC!cQR!fClg+bG#q6 zV0+ij;_LS9Z)|d;K6i-lfB?ENH`c|}Sv8iM*+6P>0!0A)2f)|o{O9;!+gcgevTw0- zd$~S#{aVN2FV)VPPopu0JSqI_LL)~-5=_imlO1mJH_sg8>F>E>zU;f}63NPHWF|HQ zrg+DVCowI#?JsInWtcMdJLMc;>Z3U5Co4#9d{d-%gHgdY_OQSuJ09{%gQKt_#K@_)`N9Y6e>&-|7^Nl3TdLGSx_nmT{dzhTBQAlbz?-0DFK8>C@0rDe zqw)BdHQSP%{GOSH`d~p9j5bbj|0R`wl>b5@ptpSr1G3tN>X2SIJuD_u=P_cROlS(5>?ckE!yk>+4$_a5WbnPT(>^}?I z=zsh?BsoIu938_f7Gs}1C{rNRu||tt!#;J8pLAMEa;h&8Jx*cGD;U;={Wz4xiz$V< z!bQG}GEWhHSJHPspD|?bGk$)>Yr~GwzxYbp{J~MNIWc{<_@R7lgoO5J@?|$T_8Qwf zv05caYTLNiFDf6$$}bRt_4=CmVMGAgAh1^xX7)`(zlI zM)}zb>wB}w$_}d$|3LkP;oL#T2Ur%VTgnt(r3-n2(UhKccW7tAubQW+i#F7+!6DGm zsEMf9WB+z(e6!;F1xT;zwWu$#NL;b>unx~_-H0V8C*0Y5`On8L2}XtMs$9wo86!D% zhwLPhedQbP?3aFmxl9y7Cv^&z9F6cfN=ZZLhNlQ(kvE$6Ld4*hBbcmQu~ zlOuIk&6t9aF?3YCohdHkO?PId8ZO5sY#FG|eU)E21~_DO6vKWLeU_$YeM`TgH`ddp z8Pa*5fzf$xmHmCn-bdB5z*X5ehJu8FjoH%e+$Hi66MYmB>Z9+w{_?>b(0DrL9Z)P{ zB6X>DWRy~OZ?dyV_;#D@F3#V?MkuQAE`utThI@?MJ3PgX2@&)CYi5D{o{EZMB-&aS zFTdr2P=|7p1DRiy9IliWiuM$;VeVgN!BDawFpal~@EgE+Yy%!d|(g ze?N8vxC5E!Z>O*eT5^gRr)vBZJV;m_e^)imCh&br+zAtDgOrg0Q`9f!!tTsbOctxB zZ`#Dz-thd%+CAqZ@4I+VDcVGPGhZ6OTRGg7T((U|6dQbRO6tYl1*IwHK_9iiH7=*M z3ZYgiVq23D#}yZ!T+@ItwB28Ow!Cpi?#xOFW7*l^pG%Bw%4$6*h`;aD4eVS~D*Bv< zdM`k}=hhso!7sg^MnYTh4Q)6R?!h56g`!EfsD`4X#@)@DVndQX2WgVzuvfN`1w-8t zC0UgkS~_Tr9I^I6^RH&c>J-gwDT!RMi?}JgO2QJ@nSo^~@35 z?af*G9p;FC>oYGILQovXeJWSb=CT4sVhX}E9$SK=;Tbg#z+5u5>F?~ zP8{YP>ykJ)SXVpUO$b4nhG1`L5{#R$WlX3eS+6uEiWk&OHz+PD)7ZqJ2&~}xU|Fot zUMUe8r3GEG^w@N}Y5nFG-Cxd1v8k&ErZcM4bln&3S4`Ti0OAY#dbK7&gMDg?guoOD z<~oB#V?xRqY2fH14}5@EeT^=4{}QkJ6|2m(x8rkL1Gi}?o8QMvBcp(y zvDj)mEvSC+zUhNnbY z(H)^HB?_c$xdmoq2*ayPV6TW;*%UAjj5-Uk^>R8p$2 z7w7P57Jk-2OIoPn+Lqj+Y8{(*rb$T9rw?<94R!OpF{I4)rmjav=w4zqOPj^SDjg`- zuWjbTC;ypBn|VFAko)|OJP=i{WQ7tFlcLY;8Exx8qlMfl%xXk3g9ZDIXo?@C78-8C=o2 zrPWO{9uAS()dPF9xV)if_QS?3VE3=5Ptk2J$gb|sr{h|bljWl!(7h3X`_@pWO-dpJ zf3HNwPvUIi*!Mo&Q)AL3QnTLk&l@(2%O71L@9zol-F@}y(}OcH6`TB?Z}(icIRWYK zpE+Ew=zEmya#o2gt%o>W19{v7WvCC;bDY|5{I!6fl4)u3Br$9?Qa&!m{@TNTpuOK%_Vo{n5t$F? zhJDecyhn^sJ;$sqKr0kUGU%qM7C9j4s!{>4)_5LGDU+=q&Tya5UE@*_M!Rbx|KSNNrzRBs5zQje#_L_%#NKR6ErCfkG4&f zQeJL|a=zIr`0W>VJY?lSBgW_erB$7ot;cri<^b}y^>;x&qr9;jcFBhpafwrYEEHk+ z2MbkH--t>IxgIaelnt@DnL46CcH5&d3f_CRo~dbfeBCLUv~Bog_b+}fNHB#?Wumcq z4DhYutYrQNLXDT5`CX-Iun@qSV;wpw9H%1+U+zHT{J0q+ZR%>2Hv|O3-R6cq?tgts z!y~*LA{eM-V1@wR|5vItbso0Gt=t|eV#^xx4y5=0)^o|(F7d#d`CKszuM_++E9=3R zKh@vw{sTE5*P3d`ZjgN;bp3svkixg|HoCU?vYNWoORK)ISh)?0wHM;O6a%YEe|8-* z36bFHbHfTZUy+5o^I{_6w|N)0Ewj?!sp3SYlnY7rZskz8p6oAdy%_9>rbAvmLGS=8 z&Ad-2AdGG3V(((>X_P-k>2JKc$>H6igKif9q1yodnFVH1(nXHloE(_5EWO|Oc3pO$ z4ZIKFe%%UYJLmpvN!g7}cx=A@Ju>;&Dmr#Z^2t2H&T;QAeiwB_qw@M;&-oMhG`9(5 z4njqkre)$|h`JwO?eTe&PJ~gKB|19&gxF-lhRpplwJFP+Q}JmQnOCgU;r^KtLU(kl zz!@u`eDpHGMV*D(14UWV_@Rl!BE zxKrwKNUq+vRrl!|UUKG^&2?%ex$AJsqoL40+0TfZ{aOmz&?5z_SM7lg=r0rI#2;xA zadKXLv>zY$2FR8`R5@^7A`uWjIjk-YfBV`8K>}Pf&%-UOLDU+rf#OOE!bz`xo_w>` z9`u&(a>&L^i=t(j*PD{xjNY*2g4aXoVj;&IHCxhmXLGfbU}+AkhSp7n8NT^FiJ4J2 zu#iw<;^NMcif#^0`2^#hnb+gY{}%h|pzXHgS7WU%w#ol~TwA|;GbH$Z{ddLr+vCR--0?Wdj8oUAZTtWK$4*0hc0h}3mR%1(%EmDTNOC`gvr>G9W6^VLi|+O=@=cVli@n_Cl?996I{ljI12mVENd z@MQeL@94$=eNyUbg3|zwosYQCc5!D@Pocc}ZC}R>t-Ts%c`8O>2@P99XC_a69TK=b zm_!z9UUd8D4q9uvA}z%mic~g<I#uBg9dp zaj9#&Pqdlviw1N#Tw^4S84x^@;f_$8SjeV)@2A}_BmgE^8J7!Ekh*HpMrd!EQ@jCl z2!R-3-x@|pJ(@dP{rD9#l>9Nfw7&t;IKruWCUnCX+Er)4Gq8;q%_}YtiBH&GAZG}L zu)14}%qPmUM(7DTqhqtyYeeEGR4G8)#$A_7d~u2ho2*Cu{IPZ^=oo~Fag6v?6)KQQ zwPwLiH(+c_1Q9xPJTJH6j1#kz+lKuoX9~Ih@WtzjQ>*dFt*72_dd1r$DSkwgSP_fP z&neGHKIHN5qiS@}k`?g9I++wwN?=DWSH!uFx?ertFDGKIAxm>2mQs5;cC|4FPu5*2X~5wXbv6Vv`wsXO-Ngfy^rHz_4hsdwKui z$aa9f`f{27_-Zh&MfYo5^a)X^lQU-E(Mu*w&s9#4+VSdmYAZzHz|M2QAg?^E{Hm84 z;rGK>zDHVWX!PM9b{<4&Yf1CtW*r*t%b?^`DdaE=W@?-4J3#nQ?0XQnv5Okx;y=J2 zm}!3lJMT^2DR>pVIQ6`WnKFR#Lpo20Qd`B7b|zU&_vZ8;a$q6zuARsZ=y^Hq4+Jyg zO6mimJv#5>r8nLtlCo5UJ8dEZ!ct>^jh$=h!TDeWSCfjj+hr6(PR(J|O?DDFv?^Qy z!er-V=ebJU*tYd*H3zp4tz^RWIcxw)vTFaoYuIcLBEQAjkM|UZ;M}Dz2IXlSV0r># z@J(eqzg~GSG}tiMPcwvu^|CNr`L$31x#=d?R5o`$EX4FCVa-TV6mOv;=r^<7FnSO3 z^CYm`YCoK2ZQ^6gvV2f^ee7a&D9(R`ohXbee!i))Iom?ryejaFM+=JWv@5*U|4|`% z?Wmjg{iXRvwc7`e%7VF*&#|oEon-kIc5Tr&U2{gH;DtH{d_lLx8gNE^oKnSVK7K{>REK}z8p z1@~=RB{jRi>x}ohwxBbWI#50#+7O%^Z+`rXo~*ZnL_eOMN!H>LyHle zUTKV_eqt?IIj$E7IQ9Ol)blRwZg6ie5RM*0w^m&%?k}EtwJ~5{TG|v~Br($tf1*uC zSuKQ5$NvUHJd4Hrbg?^j2Tu_kdl=)V(MH(zC3R(`!u zpN=o8^}c29nf61KUBG)?q>SS-#reh(>c+1t^byx{PBHS&OP`}ZVjkz6UkJAmYd3Ud z?GVNC(sOeMB-J#rsS+2vLtfORYSykm^v!QI5G8005@fM3eZ*SR1G>d`a8#q)@C9MY z2pPfmPMC>tnR(j9!oTH|lw4!vxRMVG`ZQ(!m;vf3p&ih*C57e7GYKkjy$!=0LLZ65 zMk8!;0BHIF$@ocWa*IbeG*A)1^t2hO(bR7mfD;IcfjDT;!U^`oR0rhzTbSOjWeCR| z^cP>S?0aIS2Zu$bXjfk>gez@Oi2Uw`!8mh5Rg8^9182u%%L|_yPUPHv*U!yO=&Xk1 z2=HK`6-AeG_)^u*uTtK}^zh(HS%dh&yr>D7%1uiYK*=DD{|e#pN5EbVYsNj{rPjP; zYv*Xrj_`rbutbk=n-@NUe1T7|psRFt(bHv_<& zvct!8enqahGWaMrEzau?K0+C$el+=bzHt{;(5U$SB@TYRLgZJxujy+^eTuv%%I)$w zF`f39Ql@Txv@TRYNUr8}Z+%=F26Rh3o_D2=C+G%XQO(Kl8&EPs$yHnc3gZ89ic66d zlI=PyrV@?CgGY{wXq2=FaHv^y{<@#QQwwrt+xm1z^xU8x6SH!6IMmkd(Kix`vPb#I zzMnu7y`h_@nkHDD}jc!v74ci~o?_>5x7shB>52cdshf<&xo8~R#-u*l?ekf_X5ThXOm7-jvmnUsZdj;`=`myU zeCR8*ICg}GnK2rdthc?Wt%AX`BZ-yt<3EX3AH1cln`=~gw3R9b+V;282t0c|gZGG{-k_2ydjp*XcYSTa|b zm#el2ZZZAiMqr3K!YpH#A~^?*zm_%5f2ZlQi6xUap5@`AZ>)Ax1@F#Oz8{j;a|wOe z1wYTyp(9S_eh7b|l=*FTuFU#T!VIJSLF}$^gENRhQ(TnG_VMZL++;}OotWAF+@lvN zB!Ns-5c+gWBy;Uc^YD}UcvEs!Y-*2^+OClVhMHP+R$I_pOw%gUF16@4-C=RLrF}wl z^pH(sl7GMwVzi=+YN8{9-^pib@5BUEl~l;<#yMeJfo(k+vNu1~jYjzoS;SqZQ3`*p zo3r>l4a>e0TEo`1iFKl}yjWYQK%IBFD*4*vI)}>fg9-ww3>IH@-kXN_xR=x?SMkYk z!QG+gEoQm$uyO7rU5R|%97*A8Ap&Sk3BxN>;lN#|U#d0P89)g-`o=;8qhNc(Zj5q8 zM&nCp8l{88Oc-G1i&x!4IdKptOHa?;y3)zYc-p)OKu!256lu#hDNT8_6<%RZnoKsj zHHcymH;jbl-sxHmTB#J4{UzcvrJ_sqU7}?VZ27^zk)_X7fHG7?w+-yLgp4NX59QXU z%7PcFpFqM0_*`7VW6LsflP4c~y6! zef@R~-FjO%%qR8p>Ve5$fwaq8D}ka2`Q^7t1LVML0WW^O5&ZFe2MZ-Rp+p?opgRgQ z86fb1C3Su}J$Z>2v|*wn{$!wANmD~3r8-T=djvJ^X4gM2cSN9UF>V+4`=oS#{3`_d zDm|TazL{5fwGg#u2?7*ow(YL+98>dy;W;1+%>p`2{w27tE%*aXZSjl6i-}=ztvhLR zt~|AR#QAS0;M@KuAW_PvId8G1i@6J5L#7^<+`J{Qy<8t_oy|N`^xg({FBt%1-|FCm z7^Sp5by!^YC%FBrR*v*!Ei%u|)-L*!WLvX#cdkJ8%F(l2UTZ;xytjwMV(T-Fk4xc6 zwHFT`qQgG`hRzSp6WyP>Jy?r;eTsT&Ufj}xm{%dIR#2}{hvL*s5ZOXy4m1;09NWff zXS|FB8LK8-N#cbU^r!=bMERLcr0BWIj~mQsKt+3JrNEOGYoYxQ)uq6K)>ZLJ9bflx z4}c1DXPa-G ztRiZpPR(D|RzLGY&N2!k#D-8|QSOila~&$5igKYJZ-{Hp<)7C`^X;qjocI22>?EE? z06wt$)F5TO&^qm9$;+9Fwe`ox)k+G+2`|Fz)iRxYGqdwE+gTF7{&j{V1-mlliN=if z>S${26o;~t;FyMI@Oc8mA5Uf$RF;R6l2&bJG50~z3Ewi=>2EdLKhOihvEe^Zsz^6S zSo51l7$Q{|19rpyetxBa*XZp0Y@Zb9g{FXr9fKhrAO=|-gujR6zmqiggC|?@;r1#v z%6gISCMMUO#VCFYIWHF(g(p7v{SPGhwOp$@&br`qqfD^7)kVb&)oSrw@NHFZ%LGD zEaLK$I(}PyJDEUn7SQL9ub7<%N(GN}k0p}DA~quTZ^sepu@mK+mXr(QuY-7-M5#3d z2jYo&O>dRvg45$d=)Wv612lTn{RJE_G%1^mjlM`unFv#0a5{}|=-~yi*>ZBMc+3Yg zhAv<0&*ATj+yz|+a2~FdHK#KCOLk%5-MYnE!k(wB>U5B6WU@z5A0PABnad=({pzXq5phx_5U^Y-^C!0LZOvd>>QFixedF7VR^vBBE*AV#j8x) zoM6w*@=tH&6V%=04=7Vs^eZW8&*)#po{X&rokTu35ZXN4J%9P%Xfx#e+7X(!m!H{i zSVo>WYZcTtn77&dW)=L!+|M23sEA^=X)^hA@^qQ(BH3f= zee(0aU13Z-uMEUi+aOH4K-}RW(x`7tv}Y=KZ)q+)X5t0cKhQ1C`-{`SBAB#!IC=i{ zzYEg>!o&_$hY2&=TPQ?p6X8ui!kceqo+*GLH$D_|qM(s@@qeJ^gp|ntOc20)_WvzO zK}H_P?g6TZm>TW(EIwe~Bh0M3g7%gD2)UYCox0BPCCQ7TK0eQ%vAM*G)h2ht?9=z7 zA9N~3{@2rtnl;y#_zxgmIdH7~>jY1AiEJM)5Vytc9i;DdWN3EfUCXNo!3T!LLGsrh zUwLs;wZF#8eKX*>oVn~{NfYSV)7lEn;#IUh_Y#woL+A?Eo`5U^?cbHpLqP{G+UXBS z4(3%*Ugs*X^Lie;fB%~2W0@DWNzucgjpT9d`Jz`XUgMTO+@98Q2b6yqN(2G!V=(9q z3)wt~tM9QSa^Dw@;+x3xucGN3DDHn8^&0*`VCK65tW^pZld7y#-*@{(HMb0HIco7*ph*} z7^a<%oCEw`0}Y5Y=7(7dU!MsRCMP?bwfp97U`t81!A{>B-!LmXwO)wQrdq$@wyftt zFt?~)9$uwL*JaG=VHZ;o@md$)I4>9XHG8g5k&ZghYyAW=1S7SnSvy6%VW13VRwM31 z;&oJ7=;}yO1KnjJ>Q&>1d|z`O2yGuAY+XWug9w|&cRB5&$r&|hyQlzyBBxb)SZFAR z(!i3xYP{9(y17bEE1$8c?fig8dq~K^vCe83LN_^jtE9R4FRWo6O|?qxsjJPEKA3ZnTAfNz;=Az|=@`a(wUz-HmW zEaOjROH5(cHl`*D7w2cHERKJr^@#9gBs-x$Hm!d?PEBhVsq*|7X1$VVdBAoY&m$QJ zs|-9Bz2=LD)07cdYs2p*(DtQ}l-dy}aKF$*{mHh~_ps|TPH+Y}?tQIdAqVXOxCBYd z6xnz4viyaCT~l1WrMH99rf7&hT%3VMjOUjL(uuU1MTBzk8UWVBHK(s-84cS!eB*zZHay* zK>ojz{8^m~4%>HY?ZOHObzUd;j(YYJYKw0s(UskAWjY%=WR9&feVLx;DI-x5rDH@lNbU4_lJgP&HNI1lt+Kv%sC z3ARtZX~f?{Pi?(iLa_`BGZ8cS;Bp0s_bTx=Pk3GTgbeqa-n$?5TN9?R&1*nz!}2DZ z2o0ZMZ3nCq?x8%t>^28-Zh0n7tXm={d`XnyXAoD;^c}atz$AkaA`CPqiKVw332+tp z&%@U0PAZ6HqtDzZAX`(UhaV0BLU#%_9%Sd5IEs{0?hs+2@JK1$t7ef*Ycsi-LsJr3 z>-~ER$#b76ks34a z&p41pS37)$k|V0C&uskuhsNZFYpGI+2lKYQ#VuMaH*m1!Qi$nKp0teTwE|jw%7P}G zcW2Y&qwTIYJlyw^I~Nv0miXxiq7M_}sUtIC880!Y03d~B5y+Ri7x#3{FvZ65&$j*( zNfjQYiv5B&FTNhP>{nao^0YlmD!`G<9ZY)n6%sv0cb Date: Thu, 10 Dec 2015 11:32:09 +0100 Subject: [PATCH 73/79] Fix generic arguments in BatchProvider --- BatchProviderIC.lua | 4 ++-- BatchProviderRC.lua | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/BatchProviderIC.lua b/BatchProviderIC.lua index e79fb6f..da34d77 100644 --- a/BatchProviderIC.lua +++ b/BatchProviderIC.lua @@ -7,10 +7,10 @@ local env = require 'argcheck.env' -- retrieve argcheck environement -- which can be overrided by the user function env.istype(obj, typename) if typename == 'DataSet' then - return obj._isDataSet + return obj and obj._isDataSet end if typename == 'FeatureProvider' then - return obj._isFeatureProvider + return obj and obj._isFeatureProvider end return torch.type(obj) == typename end diff --git a/BatchProviderRC.lua b/BatchProviderRC.lua index a090abd..2770036 100644 --- a/BatchProviderRC.lua +++ b/BatchProviderRC.lua @@ -9,10 +9,10 @@ local env = require 'argcheck.env' -- retrieve argcheck environement -- which can be overrided by the user function env.istype(obj, typename) if typename == 'DataSet' then - return obj._isDataSet + return obj and obj._isDataSet end if typename == 'FeatureProvider' then - return obj._isFeatureProvider + return obj and obj._isFeatureProvider end return torch.type(obj) == typename end From b890a4e6dae690c1f61c98a7ebf2120b370ed3a6 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 10 Dec 2015 11:36:00 +0100 Subject: [PATCH 74/79] Updating main script. Needs further testing --- config.lua | 50 +++++++++++++------------- data.lua | 97 ++++++++++++++++++++++---------------------------- main.lua | 61 +++++++++++++++----------------- model.lua | 57 ++++++------------------------ opts.lua | 69 +++++++++++------------------------- train.lua | 101 ++++++++++++++++++++--------------------------------- 6 files changed, 167 insertions(+), 268 deletions(-) diff --git a/config.lua b/config.lua index 806e76a..0e0ea08 100644 --- a/config.lua +++ b/config.lua @@ -1,3 +1,4 @@ +require 'nnf' local configs = {} @@ -8,6 +9,28 @@ local image_transformer_params = { } configs.image_transformer_params = image_transformer_params + +configs.datasetDir = 'datasets/VOCdevkit' +configs.roidbDir = 'data/selective_search_data' + +-------------------------------------------------------------------------------- +-- Training Parameters +-------------------------------------------------------------------------------- + +local train_params = { + batch_size = 16,--128, + fg_fraction = 0.25, + fg_threshold = 0.5, + bg_threshold = {0.0,0.5}, + do_flip = true, +} + +configs.train_params = train_params + +-------------------------------------------------------------------------------- +-- Feature Provider Parameters +-------------------------------------------------------------------------------- + configs.algo = {} -------------------------------------------------------------------------------- @@ -18,23 +41,16 @@ local fp_params = { crop_size = 227, padding = 16, use_square = false, - image_transformer = image_transformer } local bp_params = { iter_per_batch = 100, nTimesMoreData = 10, - batch_size = opt.batch_size, - fg_fraction = opt.fg_frac, - fg_threshold = 0.5, - bg_threshold = {0.0,0.5}, - do_flip = true, --- batch_dim = {3,fp_params.crop_size,fp_params.crop_size}, } local RCNN = { fp_params=fp_params, bp_params=bp_params, - bp = nnf.BatchProvider + bp = nnf.BatchProviderRC } configs.algo.RCNN = RCNN @@ -51,7 +67,6 @@ local feat_dim = {num_chns*pooled_size} local fp_params = { scales = {480,576,688,874,1200}, - randomscale = true, sz_conv_standard = 13, step_standard = 16, offset0 = 21, @@ -59,23 +74,16 @@ local fp_params = { inputArea = 224^2, pooling_scales = pooling_scales, num_feat_chns = num_chns, - image_transformer = image_transformer } local bp_params = { iter_per_batch = 500, nTimesMoreData = 10, - batch_size = opt.batch_size, - fg_fraction = opt.fg_frac, - fg_threshold = 0.5, - bg_threshold = {0.1,0.5}, - do_flip = true, --- batch_dim = feat_dim, } local SPP = { fp_params=fp_params, bp_params=bp_params, - bp = nnf.BatchProvider + bp = nnf.BatchProviderRC } configs.algo.SPP = SPP @@ -87,21 +95,15 @@ configs.algo.SPP = SPP local fp_params = { scale = {600}, max_size = 1000, - image_transformer = image_transformer } local bp_params = { imgs_per_batch = 2, - batch_size = opt.batch_size, - fg_fraction = opt.fg_frac, - fg_threshold = 0.5, - bg_threshold = {0.0,0.5}, - do_flip = true, } local FRCNN = { fp_params=fp_params, bp_params=bp_params, - bp = nnf.BatchProviderROI + bp = nnf.BatchProviderIC } configs.algo.FRCNN = FRCNN diff --git a/data.lua b/data.lua index 04569db..59d3284 100644 --- a/data.lua +++ b/data.lua @@ -1,78 +1,65 @@ -------------------------------------------------------------------------------- -- Prepare data model -------------------------------------------------------------------------------- -paths.mkdir(opt.save) -trainCache = paths.concat(opt.save_base,'trainCache.t7') -testCache = paths.concat(opt.save_base,'testCache.t7') +local trainCache = paths.concat(rundir,'trainCache.t7') +--testCache = paths.concat(opt.save_base,'testCache.t7') -local pooler -local feat_dim ---[[ -if opt.algo == 'SPP' then - local conv_list = features:findModules(opt.backend..'.SpatialConvolution') - local num_chns = conv_list[#conv_list].nOutputPlane - pooler = model:get(2):clone():float() - local pyr = torch.Tensor(pooler.pyr):t() - local pooled_size = pyr[1]:dot(pyr[2]) - feat_dim = {num_chns*pooled_size} -elseif opt.algo == 'RCNN' then - feat_dim = {3,227,227} -end ---]] - -image_transformer = nnf.ImageTransformer{mean_pix=image_mean} +local config = paths.dofile('config.lua') +image_transformer = nnf.ImageTransformer(config.image_transformer_params) local FP = nnf[opt.algo] local fp_params = config.algo[opt.algo].fp_params local bp_params = config.algo[opt.algo].bp_params local BP = config.algo[opt.algo].bp -if paths.filep(trainCache) then - print('Loading train metadata from cache') - batch_provider = torch.load(trainCache) - feat_provider = batch_provider.feat_provider - ds_train = feat_provider.dataset - feat_provider.model = features -else - ds_train = nnf.DataSetPascal{image_set='trainval',classes=classes,year=opt.year, - datadir=opt.datadir,roidbdir=opt.roidbdir} - - - feat_provider = FP(fp_params) - batch_provider = BP(bp_params) - batch_provider:setupData() +local train_params = config.train_params - torch.save(trainCache,batch_provider) - feat_provider.model = features +-- add common parameters +fp_params.image_transformer = image_transformer +for k,v in pairs(train_params) do + bp_params[k] = v end -if paths.filep(testCache) then - print('Loading test metadata from cache') - batch_provider_test = torch.load(testCache) - feat_provider_test = batch_provider_test.feat_provider - ds_test = feat_provider_test.dataset - feat_provider_test.model = features -else - ds_test = nnf.DataSetPascal{image_set='test',classes=classes,year=opt.year, - datadir=opt.datadir,roidbdir=opt.roidbdir} +------------------------------------------------------------------------------- +-- Create structures +-------------------------------------------------------------------------------- +ds_train = nnf.DataSetPascal{ + image_set='trainval', + year=2007,--opt.year, + datadir=config.datasetDir, + roidbdir=config.roidbDir +} - feat_provider_test = FP(fp_params) - -- disable flip ? - bp_params.do_flip = false - batch_provider_test = BP(bp_params) +feat_provider = FP(fp_params) +feat_provider:training() - batch_provider_test:setupData() - - torch.save(testCache,batch_provider_test) - feat_provider_test.model = features +bp_params.dataset = ds_train +bp_params.feat_provider = feat_provider +batch_provider = BP(bp_params) + +if paths.filep(trainCache) then + print('Loading train metadata from cache') + local metadata = torch.load(trainCache) + batch_provider.bboxes = metadata +else + batch_provider:setupData() + torch.save(trainCache, batch_provider.bboxes) end --- compute feature cache +-- test +ds_test = nnf.DataSetPascal{ + image_set='test', + year=2007,--opt.year, + datadir=config.datasetDir, + roidbdir=config.roidbDir +} -features = nil -model = nil +-- only needed because of SPP +-- could be the same as the one for training +--feat_provider_test = FP(fp_params) +--feat_provider_test:evaluate() collectgarbage() diff --git a/main.lua b/main.lua index 2320090..0a8705b 100644 --- a/main.lua +++ b/main.lua @@ -1,6 +1,7 @@ require 'nnf' -require 'cunn' +--require 'cunn' require 'optim' +require 'trepl' local opts = paths.dofile('opts.lua') opt = opts.parse(arg) @@ -8,51 +9,47 @@ print(opt) if opt.seed ~= 0 then torch.manualSeed(opt.seed) - cutorch.manualSeed(opt.seed) + if opt.gpu > 0 then + cutorch.manualSeed(opt.seed) + end end -cutorch.setDevice(opt.gpu) torch.setnumthreads(opt.numthreads) --------------------------------------------------------------------------------- --- Select target classes --------------------------------------------------------------------------------- - -if opt.classes == 'all' then - classes={'aeroplane','bicycle','bird','boat','bottle','bus','car', - 'cat','chair','cow','diningtable','dog','horse','motorbike', - 'person','pottedplant','sheep','sofa','train','tvmonitor'} +local tensor_type +if opt.gpu > 0 then + require 'cunn' + cutorch.setDevice(opt.gpu) + tensor_type = 'torch.CudaTensor' + print('Using GPU mode on device '..opt.gpu) else - classes = {opt.classes} + require 'nn' + tensor_type = 'torch.FloatTensor' + print('Using CPU mode') end -------------------------------------------------------------------------------- +model, criterion = paths.dofile('model.lua') +model:type(tensor_type) +criterion:type(tensor_type) -paths.dofile('model.lua') +-- prepate training and test data paths.dofile('data.lua') --------------------------------------------------------------------------------- --- Prepare training model --------------------------------------------------------------------------------- +-- Do training paths.dofile('train.lua') -ds_train.roidb = nil -collectgarbage() -collectgarbage() - --------------------------------------------------------------------------------- --- Do full evaluation --------------------------------------------------------------------------------- - -print('==> Evaluation') -if opt.algo == 'FRCNN' then - tester = nnf.Tester_FRCNN(model,feat_provider_test) -else - tester = nnf.Tester(classifier,feat_provider_test) -end -tester.cachefolder = paths.concat(opt.save,'evaluation',ds_test.dataset_name) +-- evaluation +print('==> Evaluating') +-- add softmax to classifier, because we were using nn.CrossEntropyCriterion +local softmax = nn.SoftMax() +softmax:type(tensor_type) +model:add(softmax) +feat_provider:evaluate() +-- define the class to test the model on the full dataset +tester = nnf.Tester(model, feat_provider, ds_test) +tester.cachefolder = rundir tester:test(opt.num_iter) - diff --git a/model.lua b/model.lua index 36812e1..029c8a3 100644 --- a/model.lua +++ b/model.lua @@ -1,61 +1,26 @@ require 'nn' -require 'inn' -require 'cudnn' -local reshapeLastLinearLayer = paths.dofile('utils.lua').reshapeLastLinearLayer -local convertCaffeModelToTorch = paths.dofile('utils.lua').convertCaffeModelToTorch +--require 'inn' +--require 'cudnn' --- 1.1. Create Network -local config = opt.netType -local createModel = paths.dofile('models/' .. config .. '.lua') -print('=> Creating model from file: models/' .. config .. '.lua') -model = createModel(opt.backend) +local createModel = paths.dofile('models/' .. opt.netType .. '.lua') +print('=> Creating model from file: models/' .. opt.netType .. '.lua') +local model = createModel() --- convert to accept inputs in the range 0-1 RGB format -convertCaffeModelToTorch(model,{1,1}) +local criterion = nn.CrossEntropyCriterion() -reshapeLastLinearLayer(model,#classes+1) -image_mean = {128/255,128/255,128/255} - -if opt.algo == 'RCNN' then - classifier = model -elseif opt.algo == 'SPP' then - features = model:get(1) - classifier = model:get(3) -elseif opt.algo == 'FRCNN' then - local temp = nn.Sequential() - local features = model:get(1) - local classifier = model:get(3) - local prl = nn.ParallelTable() - prl:add(features) - prl:add(nn.Identity()) - temp:add(prl) - temp:add(nnf.ROIPooling(7,7)) - temp:add(nn.View(-1):setNumInputDims(3)) - temp:add(classifier) -end - --- 2. Create Criterion -criterion = nn.CrossEntropyCriterion() - -print('=> Model') +print('Model:') print(model) - -print('=> Criterion') +print('Criterion:') print(criterion) --- 3. If preloading option is set, preload weights from existing models appropriately +-- If preloading option is set, preload weights from existing models appropriately if opt.retrain ~= 'none' then assert(paths.filep(opt.retrain), 'File not found: ' .. opt.retrain) print('Loading model from file: ' .. opt.retrain); - classifier = torch.load(opt.retrain) + model = torch.load(opt.retrain) end --- 4. Convert model to CUDA -print('==> Converting model to CUDA') -model = model:cuda() -criterion:cuda() - collectgarbage() - +return model, criterion diff --git a/opts.lua b/opts.lua index 3665874..457b6f2 100644 --- a/opts.lua +++ b/opts.lua @@ -8,56 +8,29 @@ function M.parse(arg) cmd:text() cmd:text('Options:') - local curr_dir = paths.cwd() - local defaultDataSetDir = paths.concat(curr_dir,'datasets') - local defaultDataDir = paths.concat(defaultDataSetDir,'VOCdevkit/') - local defaultROIDBDir = paths.concat(curr_dir,'data','selective_search_data/') - - cmd:text('Folder parameters') - cmd:option('-cache',paths.concat(curr_dir,'cachedir'),'Cache dir') - cmd:option('-datadir',defaultDataDir,'Path to dataset') - cmd:option('-roidbdir',defaultROIDBDir,'Path to ROIDB') - cmd:text() - cmd:text('Model parameters') - cmd:option('-algo','SPP','Detection framework. Options: RCNN | SPP') - cmd:option('-netType','zeiler','Options: zeiler | vgg') - cmd:option('-backend','cudnn','Options: nn | cudnn') - cmd:text() - cmd:text('Data parameters') - cmd:option('-year',2007,'DataSet year (for Pascal)') - cmd:option('-ipb',500,'iter per batch') - cmd:option('-ntmd',10,'nTimesMoreData') - cmd:option('-fg_frac',0.25,'fg_fraction') - cmd:option('-classes','all','use all classes (all) or given class') - cmd:text() - cmd:text('Training parameters') - cmd:option('-lr',1e-2,'learning rate') - cmd:option('-num_iter',300,'number of iterations') - cmd:option('-nsmooth',40,'number of iterations before reducing learning rate') - cmd:option('-nred',4,'number of divisions by 2 before stopping learning') - cmd:option('-nildfdx',false,'erase memory of gradients when reducing learning rate') - cmd:option('-batch_size',128,'batch size') - cmd:text() - cmd:text('Others') - cmd:option('-gpu',1,'gpu device to use') - cmd:option('-numthreads',6,'number of threads to use') - cmd:option('-comment','','additional comment to the name') - cmd:option('-seed',0,'random seed (0 = no fixed seed)') - cmd:option('-retrain','none','modelpath for finetuning') - cmd:text() - + cmd:option('-name', 'obj-detect', 'base name') + cmd:option('-algo', 'RCNN', 'Detection framework. Options: RCNN | FRCNN') + cmd:option('-netType', 'alexnet', 'Options: alexnet') + cmd:option('-lr', 1e-3, 'learning rate') + cmd:option('-num_iter', 40000, 'number of iterations') + cmd:option('-disp_iter', 100, 'display every n iterations') + cmd:option('-lr_step', 30000, 'step for reducing the learning rate') + cmd:option('-save_step', 10000, 'step for saving the model') + cmd:option('-gpu', 1, 'gpu to use (0 for cpu mode)') + cmd:option('-conf_mat', false, 'Compute confusion matrix during training') + cmd:option('-seed', 1, 'fix random seed (if ~= 0)') + cmd:option('-numthreads',6, 'number of threads') + cmd:option('-retrain', 'none', 'modelpath for finetuning') local opt = cmd:parse(arg or {}) - -- add commandline specified options - opt.save = paths.concat(opt.cache, - cmd:string(opt.netType, opt, - {retrain=true, optimState=true, cache=true, - data=true, gpu=true, numthread=true, - netType=true})) - -- add date/time - opt.save_base = opt.save - local date_time = os.date():gsub(' ','') - opt.save = paths.concat(opt.save, date_time) + + local exp_name = cmd:string(opt.name, opt, {name=true, gpu=true, numthreads=true}) + + rundir = 'cachedir/'..exp_name + paths.mkdir(rundir) + + cmd:log(paths.concat(rundir,'log'), opt) + cmd:addTime('Object-Detection.Torch') return opt diff --git a/train.lua b/train.lua index ae2891f..8184922 100644 --- a/train.lua +++ b/train.lua @@ -1,74 +1,49 @@ +trainer = nnf.Trainer(model, criterion, batch_provider) +local num_iter = opt.num_iter/opt.disp_iter +local lr_step = opt.lr_step/opt.disp_iter +local save_step = opt.save_step/opt.disp_iter -local savedModel = model:clone('weight','bias','running_mean','running_std') - -trainer = nnf.Trainer(classifier,criterion,batch_provider) trainer.optimState.learningRate = opt.lr -local conf_classes = {} -table.insert(conf_classes,'background') -for i=1,#classes do - table.insert(conf_classes,classes[i]) -end -trainer.confusion = optim.ConfusionMatrix(conf_classes) - ---[[ -validator = nnf.Tester(classifier,feat_provider_test) -validator.cachefolder = opt.save_base -validator.cachename = 'validation_data.t7' -validator.batch_provider = batch_provider_test ---]] -logger = optim.Logger(paths.concat(opt.save,'log.txt')) -val_err = {} -val_counter = 0 -reduc_counter = 0 - -inputs = torch.FloatTensor() -targets = torch.IntTensor() -for i=1,opt.num_iter do - - print('Iteration: '..i..'/'..opt.num_iter) - inputs,targets = batch_provider:getBatch(inputs,targets) - print('==> Training '..paths.basename(opt.save_base)) - trainer:train(inputs,targets) - print('==> Training Error: '..trainer.fx[i]) - print(trainer.confusion) - - collectgarbage() +logger = optim.Logger(paths.concat(rundir,'train.log')) - --err = validator:validate(criterion) - --print('==> Validation Error: '..err) - --table.insert(val_err,err) +if opt.conf_mat then + local conf_classes = {'background'} + for k,v in ipairs(ds_train.classes) do + table.insert(conf_classes,v) + end + trainer.confusion = optim.ConfusionMatrix(conf_classes) +end - logger:add{['train error (iters per batch='..batch_provider.iter_per_batch.. - ')']=trainer.fx[i],['val error']=err, - ['learning rate']=trainer.optimState.learningRate} +local lightModel = model:clone('weight','bias','running_mean','running_std') - val_counter = val_counter + 1 +-- main training loop +for i=1,num_iter do + if i % lr_step == 0 then + trainer.optimState.learningRate = trainer.optimState.learningRate/10 + end + print(('Iteration %3d/%-3d'):format(i,num_iter)) + trainer:train(opt.disp_iter) + print((' Training error: %.5f'):format(trainer.fx[i])) + + if opt.conf_mat then + print(trainer.confusion) + logger:add{ + ['train error']=trainer.fx[i], + ['confusion matrix']=tostring(trainer.confusion), + ['learning rate']=trainer.optimState.learningRate + } + else + logger:add{ + ['train error']=trainer.fx[i], + ['learning rate']=trainer.optimState.learningRate + } + end - --[[ - local val_err_t = torch.Tensor(val_err) - local _,lmin = val_err_t:min(1) - if val_counter-lmin[1] >= opt.nsmooth then - print('Reducing learning rate') - trainer.optimState.learningRate = trainer.optimState.learningRate/2 - if opt.nildfdx == true then - trainer.optimState.dfdx= nil - end - val_counter = 0 - val_err = {} - reduc_counter = reduc_counter + 1 - if reduc_counter >= opt.nred then - print('Stopping training at iteration '..i) - break - end + if i% save_step == 0 then + torch.save(paths.concat(rundir, 'model.t7'), lightModel) end ---]] - collectgarbage() - collectgarbage() - torch.save(paths.concat(opt.save, 'model_' .. epoch .. '.t7'), savedModel) - --torch.save(paths.concat(opt.save, 'optimState_' .. epoch .. '.t7'), trainer.optimState) end -torch.save(paths.concat(opt.save, 'model.t7'), savedModel) - +torch.save(paths.concat(rundir, 'model.t7'), lightModel) From db302507f092c9351567caf6c8217aed8229e47d Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Thu, 10 Dec 2015 11:57:42 +0100 Subject: [PATCH 75/79] README tweak --- README.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 53405c8..5c06f5a 100644 --- a/README.md +++ b/README.md @@ -137,6 +137,15 @@ model:add(nn.SoftMax()) model:evaluate() model:cuda() +-- prepare detector +image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, + raw_scale = 255, + swap = {3,2,1}} +feat_provider = nnf.FRCNN{image_transformer=image_transformer} +feat_provider:evaluate() -- testing mode + +detector = nnf.ImageDetect(model, feat_provider) + -- Load an image I = image.lena() -- generate some random bounding boxes @@ -147,12 +156,7 @@ bboxes:select(2,2):random(1,I:size(2)/2) bboxes:select(2,3):random(I:size(3)/2+1,I:size(3)) bboxes:select(2,4):random(I:size(2)/2+1,I:size(2)) -image_transformer= nnf.ImageTransformer{mean_pix={102.9801,115.9465,122.7717}, - raw_scale = 255, - swap = {3,2,1}} -feat_provider = nnf.FRCNN{image_transformer=image_transformer} - -detector = nnf.ImageDetect(model, feat_provider) +-- detect ! scores, bboxes = detector:detect(I, bboxes) -- visualization From 3f072d287fe65caff92ccda8968ccf64a060806a Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 30 Jan 2016 12:14:04 +0100 Subject: [PATCH 76/79] Update Fast R-CNN model --- models/frcnn_alexnet.lua | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/models/frcnn_alexnet.lua b/models/frcnn_alexnet.lua index 93dbeff..a2c38a6 100644 --- a/models/frcnn_alexnet.lua +++ b/models/frcnn_alexnet.lua @@ -1,6 +1,6 @@ local function loadModel(params,backend) - backend = backend or cudnn + backend = backend or nn local features = nn.Sequential() local classifier = nn.Sequential() @@ -8,14 +8,12 @@ local function loadModel(params,backend) features:add(backend.SpatialConvolution(3,96,11,11,4,4,5,5,1)) features:add(backend.ReLU(true)) features:add(backend.SpatialMaxPooling(3,3,2,2,1,1)) - --features:add(backend.SpatialCrossMapLRN(5,0.0001,0.75,1)) - features:add(inn.SpatialCrossResponseNormalization(5,0.0001,0.75,1)) + features:add(backend.SpatialCrossMapLRN(5,0.0001,0.75,1)) features:add(backend.SpatialConvolution(96,256,5,5,1,1,1,1,2)) features:add(backend.ReLU(true)) features:add(backend.SpatialMaxPooling(3,3,2,2,1,1)) - --features:add(backend.SpatialCrossMapLRN(5,0.0001,0.75,1)) - features:add(inn.SpatialCrossResponseNormalization(5,0.0001,0.75,1)) + features:add(backend.SpatialCrossMapLRN(5,0.0001,0.75,1)) features:add(backend.SpatialConvolution(256,384,3,3,1,1,1,1,1)) features:add(backend.ReLU(true)) @@ -25,7 +23,6 @@ local function loadModel(params,backend) features:add(backend.SpatialConvolution(384,256,3,3,1,1,1,1,2)) features:add(backend.ReLU(true)) - --features:add(backend.SpatialMaxPooling(3,3,2,2,1,1)) classifier:add(nn.Linear(9216,4096)) classifier:add(backend.ReLU(true)) @@ -49,17 +46,16 @@ local function loadModel(params,backend) model:add(nn.View(-1):setNumInputDims(3)) model:add(classifier) - - local lparams = model:parameters() - - assert(#lparams == #params, 'provided parameters does not match') - - for k,v in ipairs(lparams) do - local p = params[k] - assert(p:numel() == v:numel(), 'wrong number of parameter elements !') - v:copy(p) + if params then + local lparams = model:parameters() + assert(#lparams == #params, 'provided parameters does not match') + + for k,v in ipairs(lparams) do + local p = params[k] + assert(p:numel() == v:numel(), 'wrong number of parameter elements !') + v:copy(p) + end end - return model end From b949c6a5be0433ec8594e6b55fbb4c96635f41ab Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Sat, 30 Jan 2016 12:18:47 +0100 Subject: [PATCH 77/79] Fix link in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5c06f5a..eb80c08 100644 --- a/README.md +++ b/README.md @@ -175,7 +175,7 @@ This outputs the following ![Lena](examples/example_frcnn_lena.jpg) -For an illustration on how to use this code to train a detector, or to evaluate it on Pascal, see the [examples](http://github.com/fmassa/object-detection.torch/tree/master/examples). +For an illustration on how to use this code to train a detector, or to evaluate it on Pascal, see the [examples](http://github.com/fmassa/object-detection.torch/tree/refactoring/examples). #### Bounding box proposals Note that this repo doesn't contain code for generating bounding box proposals. For the moment, they are pre-computed and loaded at run time. From 83da6a52aa652348748cddd50b4cf408421fd0d5 Mon Sep 17 00:00:00 2001 From: Francisco Massa Date: Tue, 8 Mar 2016 17:56:02 +0100 Subject: [PATCH 78/79] Update frcnn_alexnet.lua Temporary fix for https://github.com/fmassa/object-detection.torch/issues/18 I should add groups to `nn` soon. --- models/frcnn_alexnet.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/frcnn_alexnet.lua b/models/frcnn_alexnet.lua index a2c38a6..c8b033d 100644 --- a/models/frcnn_alexnet.lua +++ b/models/frcnn_alexnet.lua @@ -1,6 +1,6 @@ local function loadModel(params,backend) - backend = backend or nn + backend = backend or cudnn local features = nn.Sequential() local classifier = nn.Sequential() From a0a4a51a6983b09f55d71cb48c66410dcf406fb0 Mon Sep 17 00:00:00 2001 From: fsuzanomassa Date: Wed, 13 Apr 2016 14:10:53 +0200 Subject: [PATCH 79/79] Explicitly require modelpath in example --- examples/train_test_rcnn.lua | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/examples/train_test_rcnn.lua b/examples/train_test_rcnn.lua index 9846d6a..7701ad6 100644 --- a/examples/train_test_rcnn.lua +++ b/examples/train_test_rcnn.lua @@ -5,6 +5,7 @@ cmd:text('Example on how to train/test a RCNN based object detector on Pascal') cmd:text('') cmd:text('Options:') cmd:option('-name', 'rcnn-example', 'base name') +cmd:option('-modelpath', '', 'path to the pre-trained model') cmd:option('-lr', 1e-3, 'learning rate') cmd:option('-num_iter', 40000, 'number of iterations') cmd:option('-disp_iter', 100, 'display every n iterations') @@ -16,7 +17,10 @@ cmd:option('-numthreads',6, 'number of threads') opt = cmd:parse(arg or {}) -exp_name = cmd:string(opt.name, opt, {name=true, gpu=true, numthreads=true}) +assert(paths.filep(opt.modelpath), 'need to provide the path for the pre-trained model') + +exp_name = cmd:string(opt.name, opt, {name=true, gpu=true, numthreads=true, + modelpath=true}) rundir = '../cachedir/'..exp_name paths.mkdir(rundir) @@ -49,8 +53,13 @@ torch.setnumthreads(opt.numthreads) -------------------------------------------------------------------------------- -- define model and criterion -------------------------------------------------------------------------------- -local createModel = paths.dofile('../models/alexnet.lua') -model = createModel() +-- load pre-trained model for finetuning +-- should already have the right number of outputs in the last layer, +-- which can be done by removing the last layer and replacing it by a new one +-- for example: +-- pre_trained_model:remove() -- remove last layer +-- pre_trained_model:add(nn.Linear(4096,21)) -- add new layer +model = torch.load(opt.modelpath) criterion = nn.CrossEntropyCriterion()