tensorflow: "Init node weights/Assign doesn't exist in graph" happens when use convert in tflite

System information

  • OS Platform and Distribution (e.g., Linux Ubuntu 16.04):Linux Ubuntu 18.04
  • TensorFlow installed from (source or binary):
  • TensorFlow version: Tensorflow nightly
  • Python version: 3.6
  • Installed using virtualenv? pip? conda?: pip
  • CUDA/cuDNN version: 7/10

Describe the problem When I tried to convert a TensorFlow GraphDef into a TensorFlow Lite FlatBuffer from a tf.Session object, a error happend such like this: 2019-08-14 16:01:23.946453: I tensorflow/core/grappler/clusters/single_machine.cc:356] Starting new session 2019-08-14 16:01:23.947157: E tensorflow/core/grappler/grappler_item_builder.cc:656] Init node weights/Assign doesn't exist in graph and my code all showed below:

def main(_):

  def loss_function(weight, logits, labels):
    labels = tf.one_hot(labels,4)
    labels = tf.cast(labels, tf.float32)
    first = tf.reduce_sum(tf.multiply(-labels, logits),1)
    second_0 = tf.add(tf.exp(logits[:,0]),tf.exp(logits[:,1]))
    second_1 = tf.add(tf.exp(logits[:,2]),tf.exp(logits[:,3]))
    log = tf.log(tf.add(second_1,second_0))
    weight = tf.transpose(tf.reduce_sum(tf.multiply(labels, weight),1))
    output = tf.multiply(weight,tf.add(first,log))

    return output

  def normalize(stft):
    stft_1 = numpy.empty([stft.shape[0],128,128])
    stft_2 = numpy.empty([stft_1.shape[0],stft_1.shape[1],stft_1.shape[2],1])
    for i in range(stft_1.shape[0]):
      image = Image.fromarray(stft[i,:,:])
      image = image.resize([128,128])
      stft_1[i,:,:] = numpy.array(image)

      min = numpy.min(stft_1[i,:,:])
      max = numpy.max(stft_1[i,:,:])
      stft_1[i,:,:] = (stft_1[i,:,:]-min)/(max-min)
      stft_2[i,:,:,:] = stft_1[i,:,:].reshape((stft_1.shape[1],stft_1.shape[2],1))
    return stft_2  
# Get the data.

stft_training, mfcc_training, labels_training = joblib.load(open(FLAGS.input, mode='rb'))
stft_test, mfcc_test, labels_test = joblib.load(open(FLAGS.test, mode='rb'))

stft_test = numpy.array(stft_test)
mfcc_test = numpy.array(mfcc_test)
labels_test = numpy.array(labels_test)
stft_test = normalize(stft_test)
mfcc_test = normalize(mfcc_test)

stft_training = numpy.array(stft_training)
mfcc_training = numpy.array(mfcc_training)
labels_training = numpy.array(labels_training)
stft_training = normalize(stft_training)
mfcc_training = normalize(mfcc_training)

stft_shape = stft_training.shape
stft_shape = (None, stft_shape[1], stft_shape[2], 1)

mfcc_shape = mfcc_training.shape
mfcc_shape = (None, mfcc_shape[1], mfcc_shape[2], 1)

labels_shape = labels_training.shape
labels_shape = (None)

stft_placeholder = tf.placeholder(stft_training.dtype, stft_shape)
labels_placeholder = tf.placeholder(labels_training.dtype, labels_shape)
mfcc_placeholder = tf.placeholder(mfcc_training.dtype, mfcc_shape)

dataset_training = tf.data.Dataset.from_tensor_slices((stft_placeholder, mfcc_placeholder, labels_placeholder))
dataset_training  = dataset_training.apply(
    tf.data.experimental.shuffle_and_repeat(len(stft_training), None))  
dataset_training  = dataset_training.batch(BATCH_SIZE)
dataset_training  = dataset_training.prefetch(1)
iterator_training = dataset_training.make_initializable_iterator()
next_element_training = iterator_training.get_next()
num_epochs = FLAGS.epochs

  train_size = labels_training.shape[0]

  with tf.name_scope('input'):
    stft = tf.placeholder(
        name="stft",
        dtype=data_type(),
        shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WEITH, NUM_CHANNELS))
    mfcc = tf.placeholder(
        name="mfcc",
        dtype=data_type(),
        shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WEITH, NUM_CHANNELS))
    labels = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))

  with tf.name_scope('test_input'):
    stft_t = tf.placeholder(
        data_type(),
        shape=(EVAL_BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WEITH, NUM_CHANNELS))
    mfcc_t = tf.placeholder(
        data_type(),
        shape=(EVAL_BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WEITH, NUM_CHANNELS))

  model = BRN()
  logits = model.forward(stft, mfcc)
  logits_ = tf.add(0.,logits,name="logits_")
  try:
    scalar_summary = tf.scalar_summary
    SummaryWrite = tf.train.SummaryWrite
    merge_summary = tf.merge_summary
  except:
    scalar_summary = tf.summary.scalar
    SummaryWrite = tf.summary.FileWriter
    merge_summary = tf.summary.merge
  with tf.name_scope('loss'):
    weights = [1.0, 1.7, 4.1, 5.7]
     mid = loss_function(weights, logits=logits, labels=labels)
    loss = tf.reduce_sum(mid)

    loss_summary = scalar_summary('loss', loss)
    regularizers = (tf.nn.l2_loss(model.conv1_weights) + tf.nn.l2_loss(model.conv2_weights) +
                tf.nn.l2_loss(model.fc_weights) + tf.nn.l2_loss(model.fc_biases))

    batch = tf.Variable(0, dtype=data_type())

  with tf.name_scope('train'):

    optimizer = tf.train.AdamOptimizer(0.001).minimize(loss)
  train_prediction = tf.nn.softmax(logits)
  eval_prediction = tf.nn.softmax(model.forward(stft_t, mfcc_t))
  start_time = time.time()

  def eval_in_batches(stft_data, mfcc_data, sess, type):
    size = stft_data.shape[0]
    if size < EVAL_BATCH_SIZE:
      raise ValueError("batch size for evals larger than dataset: %d" % size)
    predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
    for begin in xrange(0, size, EVAL_BATCH_SIZE):
      end = begin + EVAL_BATCH_SIZE
      if end <= size:
        if type == 'train':
          predictions[begin:end, :] = sess.run(
              train_prediction,
              feed_dict={stft: stft_data[begin:end, ...], mfcc: mfcc_data[begin:end, ...]})
        else: 
          predictions[begin:end, :] = sess.run(
              eval_prediction,
              feed_dict={stft_t: stft_data[begin:end, ...], mfcc_t: mfcc_data[begin:end, ...]})
      else:
        if type == 'train':
          batch_predictions = sess.run(
              train_prediction,
              feed_dict={stft: stft_data[-EVAL_BATCH_SIZE:, ...], mfcc: mfcc_data[-EVAL_BATCH_SIZE:, ...]})
        else:
           batch_predictions = sess.run(
              eval_prediction,
              feed_dict={stft_t: stft_data[-EVAL_BATCH_SIZE:, ...], mfcc_t: mfcc_data[-EVAL_BATCH_SIZE:, ...]})
        predictions[begin:, :] = batch_predictions[begin - size:, :]
    return predictions


  config = tf.ConfigProto()
  config.gpu_options.allow_growth = True  

  with tf.Session(config=config) as sess:

    tf.global_variables_initializer().run()

    merged = tf.summary.merge_all()
    writer = SummaryWrite(FLAGS.logs + 'train', sess.graph)
    sess.run(iterator_training.initializer, feed_dict={stft_placeholder:stft_training,
                      mfcc_placeholder:mfcc_training,
                     labels_placeholder:labels_training})

    for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):

      batch_stft, batch_mfcc, batch_labels = sess.run(next_element_training)

      feed_dict = {stft: batch_stft,
                   mfcc: batch_mfcc,
                   labels: batch_labels}
      sess.run(optimizer, feed_dict=feed_dict)
      if step % EVAL_FREQUENCY == 0:

    summary, l = sess.run([merged, loss],
                                  feed_dict=feed_dict)
        writer.add_summary(summary, step)
        elapsed_time = time.time() - start_time
        start_time = time.time()
        rate, acc = error_rate(eval_in_batches(stft_training, mfcc_training, sess, 'train'), labels_training)
        acc_summary = scalar_summary('accuracy', acc)
        print('Step %d (epoch %.2f), Minibatch loss: %.3f, Minibatch error: %.1f%%, Accuracy:%.4f' %
          (step, float(step) * BATCH_SIZE / train_size,
          l,rate, acc))
        sys.stdout.flush()
        test_error, test_acc = error_rate(eval_in_batches(stft_test, mfcc_test, sess, 'test'), labels_test)
        print('Testset error: %.1f%%, Accuracy:%.4f' % (test_error, test_acc))

converter = tf.lite.TFLiteConverter.from_session(sess, [stft,mfcc], [logits_])
tflite_model = converter.convert()
open("BRN.tflite", "wb").write(tflite_model)
    
writer.close()

When I run the official demo of converting a TensorFlow GraphDef into a TensorFlow Lite FlatBuffer from a tf.Session object, the error also happens. Does that ok? I mean, can I use the weight trained in TensorFlow Lite? or the file doesn’t save the weight?

About this issue

  • Original URL
  • State: closed
  • Created 5 years ago
  • Comments: 19 (8 by maintainers)

Most upvoted comments

@mmmmayi I am currently facing the same issue, have you found a solution ? Thanks !

hi, actually I didn’t figure it out, but it seems have no effect to the result, because I can still get the tflite model even with this error

@mmmmayi I am currently facing the same issue, have you found a solution ? Thanks !

I just fixed this problem by using tf 1.13.1 but not tf 1.14…