keras: 'ValueError: Dimensions must be equal, but are 32 and 127 for 'replica_0/model_1/add_17/add' (op: 'Add') with input shapes: [?,32,32,256], [?,127,127,256].' when use multi_gpu
keras-2.2.4, tensorflow-gpu 1.10.1
here is my network structure
from keras.models import Model
from keras.layers import Conv2D, BatchNormalization, Activation, Lambda, add, Input, concatenate
from keras import backend as K
from keras.applications.resnet50 import ResNet50
def conv_bn_relu(feature_map, filters, kernel, activation=True):
feature_map = Conv2D(filters, kernel, padding='same')(feature_map)
feature_map = BatchNormalization()(feature_map)
if activation:
feature_map = Activation('relu')(feature_map)
return feature_map
def bottleneck(inputs, depth, depth_bottleneck, stride=1):
residual = conv_bn_relu(inputs, depth_bottleneck, (1, 1))
residual = conv_bn_relu(residual, depth_bottleneck, (3, 3))
residual = conv_bn_relu(residual, depth, (1, 1), activation=False)
shortcut = conv_bn_relu(inputs, depth, [1, 1])
output = Activation('relu')(add([residual, shortcut]))
return output
def global_net(feature_maps, point_num):
global_feature_maps = []
global_outputs = []
for i, feature_map in enumerate(reversed(feature_maps)):
feature_map = conv_bn_relu(feature_map, 256, (1, 1))
if 'last_feature_map' in dir():
shape = feature_map.get_shape()
upsample = Lambda(lambda x: K.tf.image.resize_bilinear(x, (shape[1], shape[2])))(feature_map)
upsample = Conv2D(256, (1, 1), padding='same')(upsample)
last_feature_map = add([feature_map, upsample])
else:
last_feature_map = feature_map
tmp = conv_bn_relu(last_feature_map, 256, (1, 1))
out = conv_bn_relu(tmp, point_num, (3, 3))
out = Lambda(lambda x: K.tf.image.resize_bilinear(x, (128, 128)))(out)
global_feature_maps.append(last_feature_map)
global_outputs.append(out)
return global_feature_maps[::-1], global_outputs[::-1]
def refine_net(feature_maps, point_num):
refine_feature_maps = []
for i, feature_map in enumerate(feature_maps):
for j in range(i):
feature_map = bottleneck(feature_map, 256, 128)
feature_map = Lambda(lambda x: K.tf.image.resize_bilinear(x, (128, 128)))(feature_map)
refine_feature_maps.append(feature_map)
refine_feature_map = Lambda(lambda x:K.tf.concat(x, axis=3))(refine_feature_maps)
refine_feature_map = bottleneck(refine_feature_map, 256, 128)
res = conv_bn_relu(refine_feature_map, point_num, (3, 3))
return res
def build_cpn(style):
point_num = 13
backbone = ResNet50(weights='imagenet', input_shape=(512, 512, 3), include_top=False)
resnet_feature_maps = []
for layer_name in ['activation_10', 'activation_22', 'activation_40', 'activation_49']:
feature_maps = backbone.get_layer(layer_name).output
resnet_feature_maps.append(feature_maps)
global_feature_maps, global_outputs = global_net(resnet_feature_maps, point_num)
refine_output = refine_net(global_feature_maps, point_num)
cpn_outputs = global_outputs + [refine_output]
cpn_outputs = concatenate(cpn_outputs)
cpn = Model(inputs=backbone.input, outputs=cpn_outputs)
return cpn
About this issue
- Original URL
- State: closed
- Created 6 years ago
- Comments: 17 (8 by maintainers)
I have the same. Cut off pretty much everything down to a single layer, changed sizes - nothing works.
code:
error:
I don’t have a multi gpu setup to work on this piece of code. I’ve flagged this issue as bug because I think it needs attention. So hopefully someone will fix it in the future.