Skip to content

Commit

Permalink
Use the same instance of iSize to fix soumith#155
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathanasdf committed Apr 6, 2016
1 parent 728d477 commit 9803eb5
Show file tree
Hide file tree
Showing 10 changed files with 29 additions and 16 deletions.
3 changes: 2 additions & 1 deletion BatchNormalization.lua
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ function BatchNormalization:__init(nFeature, eps, momentum, affine)
self.eps = eps or 1e-5
self.train = true
self.momentum = momentum or 0.1
self.iSize = torch.LongStorage(self.nDim):fill(0)

self.running_mean = torch.zeros(nFeature)
self.running_std = torch.ones(nFeature)
Expand Down Expand Up @@ -45,7 +46,7 @@ function BatchNormalization:createIODescriptors(input)
'Only CUDA tensors are supported for cudnn.BatchNormalization!')
if not self.iDesc or not self.oDesc or not input:isSize(self.iSize) then
local nFeature = self.running_mean:numel()
self.iSize = input:size()
self.iSize:copy(input:size())
self.output:resizeAs(input)
self.gradInput:resizeAs(input)
self.iDesc = cudnn.toDescriptor(input)
Expand Down
2 changes: 1 addition & 1 deletion Pooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ function Pooling:createIODescriptors(input)
if not self.iDesc or not self.oDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
self.iSize = input:size()
self.iSize:copy(input:size())
-- resize gradInput
self.gradInput:resizeAs(input)
-- resize output
Expand Down
2 changes: 1 addition & 1 deletion Pooling3D.lua
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ function Pooling:createIODescriptors(input)
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4]
or input:size(5) ~= self.iSize[5] then
self.iSize = input:size()
self.iSize:copy(input:size())
-- resize gradInput
self.gradInput:resizeAs(input)
-- resize output
Expand Down
4 changes: 2 additions & 2 deletions SpatialConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ function SpatialConvolution:__init(nInputPlane, nOutputPlane,
'nOutputPlane should be divisible by nGroups')
self.weight = torch.Tensor(nOutputPlane, nInputPlane/self.groups, kH, kW)
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane/self.groups, kH, kW)
self.iSize = torch.LongStorage(4):fill(0)
self:reset()
-- should nil for serialization, the reset will still work
self.reset = nil
Expand Down Expand Up @@ -99,11 +100,10 @@ function SpatialConvolution:createIODescriptors(input)
batch = false
end
assert(input:dim() == 4 and input:isContiguous());
self.iSize = self.iSize or torch.LongStorage(4):fill(0)
if not self.iDesc or not self.oDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
self.iSize = input:size()
self.iSize:copy(input:size())

assert(self.nInputPlane == input:size(2), 'input has to contain: '
.. self.nInputPlane
Expand Down
3 changes: 2 additions & 1 deletion SpatialCrossMapLRN.lua
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ function LRN:__init(size, alpha, beta, k)
self.alpha = alpha or 1e-4
self.beta = beta or 0.75
self.k = k or 1.0
self.iSize = torch.LongStorage(4):fill(0)
assert(self.size >= 1 and self.size <= 16, "size has to be between 1 and 16")
assert(self.k >= 1e-5, "k has to be greater than 1e-5")
assert(self.beta >= 0.01, "Beta has to be > 0.01")
Expand Down Expand Up @@ -35,7 +36,7 @@ function LRN:createIODescriptors(input)
if not self.iDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
self.iSize = input:size()
self.iSize:copy(input:size())
self.gradInput:resizeAs(input)
self.output:resizeAs(input)

Expand Down
3 changes: 2 additions & 1 deletion SpatialDivisiveNormalization.lua
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ function DivisiveNorm:__init(size, alpha, beta, K)
self.alpha = alpha or 1e-4
self.beta = beta or 0.75
self.K = K or 2.0
self.iSize = torch.LongStorage(4):fill(0)
assert(self.size >= 1 and self.size <= 16, "size has to be between 1 and 16")
assert(self.K >= 1e-5, "K has to be greater than 1e-5")
assert(self.beta >= 0.01, "Beta has to be > 0.01")
Expand Down Expand Up @@ -35,7 +36,7 @@ function DivisiveNorm:createIODescriptors(input)
if not self.iDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
self.iSize = input:size()
self.iSize:copy(input:size())
self.gradInput:resizeAs(input)
self.output:resizeAs(input)

Expand Down
10 changes: 7 additions & 3 deletions SpatialFullConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@ autotunerCache[1] = {} -- forward
autotunerCache[2] = {} -- backwardFilter
autotunerCache[3] = {} -- backwardData

function SpatialFullConvolution:__init(...)
parent.__init(self, ...)
self.iSize = torch.LongStorage(4):fill(0)
end

-- if you change the configuration of the module manually, call this
function SpatialFullConvolution:resetWeightDescriptors()
assert(torch.typename(self.weight) == 'torch.CudaTensor',
Expand Down Expand Up @@ -69,11 +74,10 @@ function SpatialFullConvolution:createIODescriptors(input)
batch = false
end
assert(input:dim() == 4 and input:isContiguous());
self.iSize = self.iSize or torch.LongStorage(4):fill(0)
if not self.iDesc or not self.oDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
self.iSize = input:size()
self.iSize:copy(input:size())

-- resize gradInput
if self.gradInput then self.gradInput:resizeAs(input); end
Expand Down Expand Up @@ -309,7 +313,7 @@ function SpatialFullConvolution:updateOutput(input)
self:createIODescriptors(input)

-- Because SpatialFullConvolution is performing the adjoint of the forward
-- convolution operator, we need to swap the forward and backward passes.
-- convolution operator, we need to swap the forward and backward passes.
errcheck('cudnnConvolutionBackwardData', cudnn.getHandle(),
one:data(),
self.weightDesc[0], self.weight:data(),
Expand Down
4 changes: 2 additions & 2 deletions SpatialSoftMax.lua
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ function SpatialSoftMax:__init(fast)
else
self.algorithm = 'CUDNN_SOFTMAX_ACCURATE'
end
self.iSize = torch.LongStorage(4):fill(0)
end

function SpatialSoftMax:createIODescriptors(input)
self.mode = self.mode or 'CUDNN_SOFTMAX_MODE_CHANNEL'
-- after converting from nn use accurate
self.algorithm = self.algorithm or 'CUDNN_SOFTMAX_ACCURATE'
self.iSize = self.iSize or torch.LongStorage(4):fill(0)

local batch = true
local singleDim = false
Expand All @@ -34,7 +34,7 @@ function SpatialSoftMax:createIODescriptors(input)
if not self.iDesc or not self.oDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
self.iSize = input:size()
self.iSize:copy(input:size())
self.gradInput:resizeAs(input)
self.output:resizeAs(input)
self.iDesc = cudnn.toDescriptor(input)
Expand Down
2 changes: 2 additions & 0 deletions TemporalConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ function TemporalConvolution:__init(inputFrameSize, outputFrameSize,
self.gradWeight = self.gradWeight:view(outputFrameSize, inputFrameSize*kH)
--self.dW and self.kW now have different meaning than in nn.TemporalConvolution, because
--W and H are switched in temporal and spatial
self.iSize = torch.LongStorage(4):fill(0)
end

function TemporalConvolution:createIODescriptors(input)
Expand All @@ -27,6 +28,7 @@ function TemporalConvolution:createIODescriptors(input)
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4] then
sizeChanged = true
self.iSize:copy(input:size())
end
cudnn.SpatialConvolution.createIODescriptors(self,input)
if sizeChanged then
Expand Down
12 changes: 8 additions & 4 deletions VolumetricConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@ local VolumetricConvolution, parent
local ffi = require 'ffi'
local errcheck = cudnn.errcheck

function VolumetricConvolution:__init(...)
parent.__init(self, ...)
self.iSize = torch.LongStorage(5):fill(0)
end

-- if you change the configuration of the module manually, call this
function VolumetricConvolution:resetWeightDescriptors()
assert(torch.typename(self.weight) == 'torch.CudaTensor',
Expand Down Expand Up @@ -30,7 +35,7 @@ end
function VolumetricConvolution:fastest(mode)
if mode == nil then mode = true end
self.fastest_mode = mode
self.iSize = self.iSize or torch.LongStorage(4)
self.iSize = self.iSize or torch.LongStorage(5)
self.iSize:fill(0)
return self
end
Expand All @@ -45,7 +50,7 @@ function VolumetricConvolution:setMode(fmode, bdmode, bwmode)
if bwmode ~= nil then
self.bwmode = bwmode
end
self.iSize = self.iSize or torch.LongStorage(4)
self.iSize = self.iSize or torch.LongStorage(5)
self.iSize:fill(0)
return self
end
Expand All @@ -65,12 +70,11 @@ function VolumetricConvolution:createIODescriptors(input)
batch = false
end
assert(input:dim() == 5 and input:isContiguous());
self.iSize = self.iSize or torch.LongStorage(4):fill(0)
if not self.iDesc or not self.oDesc or
input:size(1) ~= self.iSize[1] or input:size(2) ~= self.iSize[2]
or input:size(3) ~= self.iSize[3] or input:size(4) ~= self.iSize[4]
or input:size(5) ~= self.iSize[5] then
self.iSize = input:size()
self.iSize:copy(input:size())
-- resize gradInput
if self.gradInput then self.gradInput:resizeAs(input); end
-- create input descriptor
Expand Down

0 comments on commit 9803eb5

Please sign in to comment.